autoconf-2.71/0000755000000000000000000000000014004625653010235 500000000000000autoconf-2.71/bin/0000755000000000000000000000000014004625653011005 500000000000000autoconf-2.71/bin/local.mk0000644000000000000000000000551114004621270012341 00000000000000# Make Autoconf commands. # Copyright (C) 1999-2007, 2009-2017, 2020-2021 Free Software # Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . bin_SCRIPTS = \ bin/autoconf \ bin/autoheader \ bin/autom4te \ bin/autoreconf \ bin/autoscan \ bin/autoupdate \ bin/ifnames EXTRA_DIST += \ bin/autoconf.as \ bin/autoheader.in \ bin/autom4te.in \ bin/autoreconf.in \ bin/autoscan.in \ bin/autoupdate.in \ bin/ifnames.in # Files that should be removed, but which Automake does not know. MOSTLYCLEANFILES += $(bin_SCRIPTS) bin/autoconf.in bin/*.tmp ## ------------- ## ## The scripts. ## ## ------------- ## # autoconf is written in M4sh. # FIXME: this target should depend on the frozen files below lib/m4sugar, # otherwise autom4te may pick up a frozen m4sh.m4f from an earlier # installation below the same $(prefix); work around this with --melt. bin/autoconf.in: $(srcdir)/bin/autoconf.as $(m4sh_m4f_dependencies) $(MY_AUTOM4TE) --language M4sh --cache '' \ --melt $(srcdir)/bin/autoconf.as -o $@ ## All the scripts depend on Makefile so that they are rebuilt when the ## prefix etc. changes. It took quite a while to have the rule correct, ## don't break it! ## Use chmod -w to prevent people from editing the wrong file by accident. $(bin_SCRIPTS): Makefile rm -f $@ $@.tmp $(MKDIR_P) $(@D) srcdir=''; \ test -f ./$@.in || srcdir=$(srcdir)/; \ $(edit) $${srcdir}$@.in >$@.tmp chmod +x $@.tmp chmod a-w $@.tmp mv $@.tmp $@ bin/autoconf: bin/autoconf.in bin/autoheader: $(srcdir)/bin/autoheader.in bin/autom4te: $(srcdir)/bin/autom4te.in bin/autoreconf: $(srcdir)/bin/autoreconf.in bin/autoscan: $(srcdir)/bin/autoscan.in bin/autoupdate: $(srcdir)/bin/autoupdate.in bin/ifnames: $(srcdir)/bin/ifnames.in ## --------------- ## ## Building TAGS. ## ## --------------- ## TAGS_DEPENDENCIES = $(EXTRA_DIST) letters = abcdefghijklmnopqrstuvwxyz LETTERS = ABCDEFGHIJKLMNOPQRSTUVWXYZ DIGITS = 0123456789 WORD_REGEXP = [$(LETTERS)$(letters)_][$(LETTERS)$(letters)$(DIGITS)_]* ETAGS_PERL = --lang=perl \ bin/autoheader.in \ bin/autoreconf.in \ bin/autoupdate.in \ bin/autoscan.in \ bin/autom4te.in \ bin/ifnames.in ETAGS_SH = --lang=none --regex='/\($(WORD_REGEXP)\)=/\1/' \ bin/autoconf.in ETAGS_ARGS += $(ETAGS_PERL) $(ETAGS_SH) autoconf-2.71/bin/autoconf.as0000644000000000000000000001562714004621270013072 00000000000000AS_INIT[]dnl -*- shell-script -*- m4_divert_push([HEADER-COPYRIGHT])dnl # @configure_input@ # autoconf -- create 'configure' using m4 macros. # Copyright (C) 1992-1994, 1996, 1999-2017, 2020-2021 Free Software # Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . m4_divert_pop([HEADER-COPYRIGHT])dnl back to BODY AS_ME_PREPARE[]dnl help=["\ Usage: $0 [OPTION]... [TEMPLATE-FILE] Generate a configuration script from a TEMPLATE-FILE if given, or 'configure.ac' if present, or else 'configure.in'. Output is sent to the standard output if TEMPLATE-FILE is given, else into 'configure'. Operation modes: -h, --help print this help, then exit -V, --version print version number, then exit -v, --verbose verbosely report processing -d, --debug don't remove temporary files -f, --force consider all files obsolete -o, --output=FILE save output in FILE (stdout is the default) -W, --warnings=CATEGORY report the warnings falling in CATEGORY Warning categories include: cross cross compilation issues gnu GNU coding standards (default in gnu and gnits modes) obsolete obsolete features or constructions (default) override user redefinitions of Automake rules or variables portability portability issues (default in gnu and gnits modes) portability-recursive nested Make variables (default with -Wportability) extra-portability extra portability issues related to obscure tools syntax dubious syntactic constructs (default) unsupported unsupported or incomplete features (default) all all the warnings no-CATEGORY turn off warnings in CATEGORY none turn off all the warnings The environment variables 'M4' and 'WARNINGS' are honored. Library directories: -B, --prepend-include=DIR prepend directory DIR to search path -I, --include=DIR append directory DIR to search path Tracing: -t, --trace=MACRO[:FORMAT] report the list of calls to MACRO -i, --initialization also trace Autoconf's initialization process In tracing mode, no configuration script is created. FORMAT defaults to '\$f:\$l:\$n:\$%'; see 'autom4te --help' for information about FORMAT. Report bugs to . GNU Autoconf home page: . General help using GNU software: ."] version=["\ autoconf (@PACKAGE_NAME@) @VERSION@ Copyright (C) @RELEASE_YEAR@ Free Software Foundation, Inc. License GPLv3+/Autoconf: GNU GPL version 3 or later , This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Written by David J. MacKenzie and Akim Demaille."] usage_err="\ Try '$as_me --help' for more information." exit_missing_arg=' m4_bpatsubst([AS_ERROR([option '$[1]' requires an argument$as_nl$usage_err])], ['], ['\\''])' # restore font-lock: ' # Variables. : ${AUTOM4TE='@bindir@/@autom4te-name@'} : ${trailer_m4='@pkgdatadir@/autoconf/trailer.m4'} autom4te_options= outfile= verbose=false # Parse command line. while test $# -gt 0 ; do option=[`expr "x$1" : 'x\(--[^=]*\)' \| \ "x$1" : 'x\(-.\)'`] optarg=[`expr "x$1" : 'x--[^=]*=\(.*\)' \| \ "x$1" : 'x-.\(.*\)'`] case $1 in --version | -V ) AS_ECHO(["$version"]); exit ;; --help | -h ) AS_ECHO(["$help"]); exit ;; --verbose | -v ) verbose=: autom4te_options="$autom4te_options $1"; shift ;; # Arguments passed as is to autom4te. --debug | -d | \ --force | -f | \ --include=* | -I?* | \ --prepend-include=* | -B?* | \ --warnings=* | -W?* ) case $1 in *\'*) arg=`AS_ECHO(["$1"]) | sed "s/'/'\\\\\\\\''/g"` ;; #' *) arg=$1 ;; esac autom4te_options="$autom4te_options '$arg'"; shift ;; # Options with separated arg passed as is to autom4te. --include | -I | \ --prepend-include | -B | \ --warnings | -W ) test $# = 1 && eval "$exit_missing_arg" case $2 in *\'*) arg=`AS_ECHO(["$2"]) | sed "s/'/'\\\\\\\\''/g"` ;; #' *) arg=$2 ;; esac autom4te_options="$autom4te_options $option '$arg'" shift; shift ;; --trace=* | -t?* ) traces="$traces --trace='"`AS_ECHO(["$optarg"]) | sed "s/'/'\\\\\\\\''/g"`"'" shift ;; --trace | -t ) test $# = 1 && eval "$exit_missing_arg" traces="$traces --trace='"`AS_ECHO(["$[2]"]) | sed "s/'/'\\\\\\\\''/g"`"'" shift; shift ;; --initialization | -i ) autom4te_options="$autom4te_options --melt" shift;; --output=* | -o?* ) outfile=$optarg shift ;; --output | -o ) test $# = 1 && eval "$exit_missing_arg" outfile=$2 shift; shift ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) exec >&2 AS_ERROR([invalid option '$[1]'$as_nl$usage_err]) ;; * ) break ;; esac done # Find the input file. case $# in 0) if test -f configure.ac; then if test -f configure.in; then AS_ECHO(["$as_me: warning: both 'configure.ac' and 'configure.in' are present."]) >&2 AS_ECHO(["$as_me: warning: proceeding with 'configure.ac'."]) >&2 fi infile=configure.ac elif test -f configure.in; then infile=configure.in else AS_ERROR([no input file]) fi test -z "$traces" && test -z "$outfile" && outfile=configure;; 1) infile=$1 ;; *) exec >&2 AS_ERROR([invalid number of arguments$as_nl$usage_err]) ;; esac # Unless specified, the output is stdout. test -z "$outfile" && outfile=- # Don't read trailer.m4 if we are tracing. if test -n "$traces"; then trailer_m4="" else # The extra quotes will be stripped by eval. trailer_m4=\""$trailer_m4"\" fi # Run autom4te with expansion. # trailer.m4 is read _before_ $infile, despite the name, # because putting it afterward screws up autom4te's location tracing. eval set x "$autom4te_options" \ --language=autoconf --output=\"\$outfile\" "$traces" \ $trailer_m4 \"\$infile\" shift $verbose && AS_ECHO(["$as_me: running $AUTOM4TE $*"]) >&2 exec "$AUTOM4TE" "$@" autoconf-2.71/bin/autoheader.in0000644000000000000000000002144114004621270013367 00000000000000#! @PERL@ # -*- Perl -*- # @configure_input@ eval 'case $# in 0) exec @PERL@ -S "$0";; *) exec @PERL@ -S "$0" "$@";; esac' if 0; # autoheader -- create 'config.h.in' from 'configure.ac'. # Copyright (C) 1992-1994, 1996, 1998-2017, 2020-2021 Free Software # Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Written by Roland McGrath. # Rewritten in Perl by Akim Demaille. use 5.006; use strict; use warnings FATAL => 'all'; BEGIN { my $pkgdatadir = $ENV{'autom4te_perllibdir'} || '@pkgdatadir@'; unshift @INC, "$pkgdatadir"; # Override SHELL. On DJGPP SHELL may not be set to a shell # that can handle redirection and quote arguments correctly, # e.g.: COMMAND.COM. For DJGPP always use the shell that configure # has detected. $ENV{'SHELL'} = '@SHELL@' if ($^O eq 'dos'); } use Autom4te::ChannelDefs; use Autom4te::Channels; use Autom4te::Configure_ac; use Autom4te::FileUtils; use Autom4te::General; use Autom4te::XFile; # These vars must be package globals so they can be accessed by code # evaluated via 'do FILE', below. our ($config_h, %symbol, %verbatim); # Lib files. my $autom4te = $ENV{'AUTOM4TE'} || '@bindir@/@autom4te-name@'; my $config_h_in; my @prepend_include; my @include; my @warnings; # $HELP # ----- $help = "Usage: $0 [OPTION]... [TEMPLATE-FILE] Create a template file of C '\#define' statements for 'configure' to use. To this end, scan TEMPLATE-FILE, or 'configure.ac' if present, or else 'configure.in'. -h, --help print this help, then exit -V, --version print version number, then exit -v, --verbose verbosely report processing -d, --debug don\'t remove temporary files -f, --force consider all files obsolete -W, --warnings=CATEGORY report the warnings falling in CATEGORY " . Autom4te::ChannelDefs::usage . " Library directories: -B, --prepend-include=DIR prepend directory DIR to search path -I, --include=DIR append directory DIR to search path Report bugs to . GNU Autoconf home page: . General help using GNU software: . "; # $VERSION # -------- $version = "autoheader (@PACKAGE_NAME@) @VERSION@ Copyright (C) @RELEASE_YEAR@ Free Software Foundation, Inc. License GPLv3+/Autoconf: GNU GPL version 3 or later , This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Written by Roland McGrath and Akim Demaille. "; ## ---------- ## ## Routines. ## ## ---------- ## # parse_args () # ------------- # Process any command line arguments. sub parse_args () { my $srcdir; getopt ('I|include=s' => \@include, 'B|prepend-include=s' => \@prepend_include, 'W|warnings=s' => \@warnings); parse_WARNINGS; parse_warnings @warnings; if (! @ARGV) { my $configure_ac = require_configure_ac; push @ARGV, $configure_ac; } } ## -------------- ## ## Main program. ## ## -------------- ## mktmpdir ('ah'); parse_args; # Preach. my $config_h_top = find_file ("config.h.top?", reverse (@prepend_include), @include); my $config_h_bot = find_file ("config.h.bot?", reverse (@prepend_include), @include); my $acconfig_h = find_file ("acconfig.h?", reverse (@prepend_include), @include); if ($config_h_top || $config_h_bot || $acconfig_h) { my $msg = << "END"; Using auxiliary files such as 'acconfig.h', 'config.h.bot' and 'config.h.top', to define templates for 'config.h.in' is deprecated and discouraged. Using the third argument of 'AC_DEFINE_UNQUOTED' and 'AC_DEFINE' allows one to define a template without 'acconfig.h': AC_DEFINE([NEED_FUNC_MAIN], 1, [Define if a function 'main' is needed.]) More sophisticated templates can also be produced, see the documentation. END $msg =~ s/^ /WARNING: /gm; msg 'obsolete', $msg; } # Set up autoconf. my $autoconf = "'$autom4te' --language=autoconf "; $autoconf .= join (' --include=', '', map { shell_quote ($_) } @include); $autoconf .= join (' --prepend-include=', '', map { shell_quote ($_) } @prepend_include); $autoconf .= ' --debug' if $debug; $autoconf .= ' --force' if $force; $autoconf .= ' --verbose' if $verbose; # ----------------------- # # Real work starts here. # # ----------------------- # # Source what the traces are trying to tell us. verb "$me: running $autoconf to trace from $ARGV[0]"; my $quoted_tmp = shell_quote ($tmp); { # Suppress all warnings from the subsidiary autoconf invocation. local $ENV{WARNINGS} = 'none'; xsystem ("$autoconf" # If you change this list, update the # 'Autoheader-preselections' section of autom4te.in. . ' --trace AC_CONFIG_HEADERS:\'$$config_h ||= \'"\'"\'$1\'"\'"\';\'' . ' --trace AH_OUTPUT:\'$$verbatim{\'"\'"\'$1\'"\'"\'} = \'"\'"\'$2\'"\'"\';\'' . ' --trace AC_DEFINE_TRACE_LITERAL:\'$$symbol{\'"\'"\'$1\'"\'"\'} = 1;\'' . " " . shell_quote ($ARGV[0]) . " >$quoted_tmp/traces.pl"); } local (%verbatim, %symbol); debug "$me: 'do'ing $tmp/traces.pl:\n" . `sed 's/^/| /' $quoted_tmp/traces.pl`; do "$tmp/traces.pl"; warn "couldn't parse $tmp/traces.pl: $@" if $@; unless ($config_h) { error "error: AC_CONFIG_HEADERS not found in $ARGV[0]"; exit 1; } # Support "outfile[:infile]", defaulting infile="outfile.in". sub templates_for_header { my ($spec) = @_; my ($header, @templates) = split(':', $spec); return @templates if @templates; return $header . '.in'; } my @config_templates = map(templates_for_header($_), split(' ', $config_h)); # We template only the first CONFIG_HEADER. $config_h_in = shift(@config_templates); $config_h =~ s/[ :].*//; # %SYMBOL might contain things like 'F77_FUNC(name,NAME)', but we keep # only the name of the macro. %symbol = map { s/\(.*//; $_ => 1 } keys %symbol; my $out = new Autom4te::XFile ("$tmp/config.hin", ">"); # Don't write "do not edit" -- it will get copied into the # config.h, which it's ok to edit. print $out "/* $config_h_in. Generated from $ARGV[0] by autoheader. */\n"; # Dump the top. if ($config_h_top) { my $in = new Autom4te::XFile ($config_h_top, "<"); while ($_ = $in->getline) { print $out $_; } } # Dump 'acconfig.h', except for its bottom portion. if ($acconfig_h) { my $in = new Autom4te::XFile ($acconfig_h, "<"); while ($_ = $in->getline) { last if /\@BOTTOM\@/; next if /\@TOP\@/; print $out $_; } } # Dump the templates from 'configure.ac'. foreach (sort keys %verbatim) { print $out "\n$verbatim{$_}\n"; } # Dump bottom portion of 'acconfig.h'. if ($acconfig_h) { my $in = new Autom4te::XFile ($acconfig_h, "<"); my $dump = 0; while ($_ = $in->getline) { print $out $_ if $dump; $dump = 1 if /\@BOTTOM\@/; } } # Dump the bottom. if ($config_h_bot) { my $in = new Autom4te::XFile ($config_h_bot, "<"); while ($_ = $in->getline) { print $out $_; } } $out->close; # Check that all the symbols have a template. { foreach my $template ("$tmp/config.hin", @config_templates) { if (! -r $template) { msg 'syntax', "cannot read $template: $!"; } else { my $in = new Autom4te::XFile ($template, "<"); while ($_ = $in->getline) { my ($sym) = /^\#\s*\w+\s+(\w+)/ or next; delete $symbol{$sym}; } } } my $suggest_ac_define = 1; foreach (sort keys %symbol) { msg 'syntax', "missing template: $_"; if ($suggest_ac_define) { msg 'syntax', "Use AC_DEFINE([$_], [], [Description])"; $suggest_ac_define = 0; } } exit 1 if keys %symbol; } update_file ("$tmp/config.hin", "$config_h_in", $force); ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: autoconf-2.71/bin/autom4te.in0000644000000000000000000010124014004621270013004 00000000000000#! @PERL@ # -*- perl -*- # @configure_input@ eval 'case $# in 0) exec @PERL@ -S "$0";; *) exec @PERL@ -S "$0" "$@";; esac' if 0; # autom4te - Wrapper around M4 libraries. # Copyright (C) 2001-2003, 2005-2017, 2020-2021 Free Software # Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . use 5.006; use strict; use warnings FATAL => 'all'; BEGIN { my $pkgdatadir = $ENV{'autom4te_perllibdir'} || '@pkgdatadir@'; unshift @INC, $pkgdatadir; # Override SHELL. On DJGPP SHELL may not be set to a shell # that can handle redirection and quote arguments correctly, # e.g.: COMMAND.COM. For DJGPP always use the shell that configure # has detected. $ENV{'SHELL'} = '@SHELL@' if ($^O eq 'dos'); } use File::Basename; use Autom4te::C4che; use Autom4te::ChannelDefs; use Autom4te::Channels; use Autom4te::FileUtils; use Autom4te::General; use Autom4te::XFile; # Data directory. my $pkgdatadir = $ENV{'AC_MACRODIR'} || '@pkgdatadir@'; # $LANGUAGE{LANGUAGE} -- Automatic options for LANGUAGE. my %language; my $output = '-'; # Mode of the output file except for traces. my $mode = "0666"; # If melt, don't use frozen files. my $melt = 0; # Names of the cache directory, cache directory index, trace cache # prefix, and output cache prefix. And the IO object for the index. my $cache; my $icache; my $tcache; my $ocache; my $icache_file; my $flock_implemented = '@PERL_FLOCK@'; # The macros to trace mapped to their format, as specified by the # user. my %trace; # The macros the user will want to trace in the future. # We need 'include' to get the included file, 'm4_pattern_forbid' and # 'm4_pattern_allow' to check the output. # # FIXME: What about 'sinclude'? my @preselect = ('include', 'm4_pattern_allow', 'm4_pattern_forbid', '_m4_warn'); # M4 include path. my @include; # Do we freeze? my $freeze = 0; # $M4. my $m4 = $ENV{"M4"} || '@M4@'; # Some non-GNU m4's don't reject the --help option, so give them /dev/null. fatal "need GNU m4 1.4 or later: $m4" if system "$m4 --help &1 | grep reload-state >/dev/null"; # Set some high recursion limit as the default limit, 250, has already # been hit with AC_OUTPUT. Don't override the user's choice. $m4 .= ' --nesting-limit=1024' if " $m4 " !~ / (--nesting-limit(=[0-9]+)?|-L[0-9]*) /; # @M4_BUILTIN -- M4 builtins and a useful comment. my @m4_builtin = `echo dumpdef | $m4 2>&1 >/dev/null`; map { s/:.*//;s/\W// } @m4_builtin; # %M4_BUILTIN_ALTERNATE_NAME # -------------------------- # The builtins are renamed, e.g., 'define' is renamed 'm4_define'. # So map 'define' to 'm4_define' and conversely. # Some macros don't follow this scheme: be sure to properly map to their # alternate name too. # # FIXME: Trace status of renamed builtins was fixed in M4 1.4.5, which # we now depend on; do we still need to do this mapping? # # So we will merge them, i.e., tracing 'BUILTIN' or tracing # 'm4_BUILTIN' will be the same: tracing both, but honoring the # *last* trace specification. # # FIXME: This is not enough: in the output '$0' will be 'BUILTIN' # sometimes and 'm4_BUILTIN' at others. We should return a unique name, # the one specified by the user. # # FIXME: To be absolutely rigorous, I would say that given that we # _redefine_ divert (instead of _copying_ it), divert and the like # should not be part of this list. my %m4_builtin_alternate_name; @m4_builtin_alternate_name{"$_", "m4_$_"} = ("m4_$_", "$_") foreach (grep { !/m4wrap|m4exit|dnl|ifelse|__.*__/ } @m4_builtin); @m4_builtin_alternate_name{"ifelse", "m4_if"} = ("m4_if", "ifelse"); @m4_builtin_alternate_name{"m4exit", "m4_exit"} = ("m4_exit", "m4exit"); @m4_builtin_alternate_name{"m4wrap", "m4_wrap"} = ("m4_wrap", "m4wrap"); # $HELP # ----- $help = "Usage: $0 [OPTION]... [FILES] Run GNU M4 on the FILES, avoiding useless runs. Output the traces if tracing, the frozen file if freezing, otherwise the expansion of the FILES. If some of the FILES are named 'FILE.m4f' they are considered to be M4 frozen files of all the previous files (which are therefore not loaded). If 'FILE.m4f' is not found, then 'FILE.m4' will be used, together with all the previous files. Some files may be optional, i.e., will only be processed if found in the include path, but then must end in '.m4?'; the question mark is not part of the actual file name. Operation modes: -h, --help print this help, then exit -V, --version print version number, then exit -v, --verbose verbosely report processing -d, --debug don't remove temporary files -o, --output=FILE save output in FILE (defaults to '-', stdout) -f, --force don't rely on cached values -W, --warnings=CATEGORY report the warnings falling in CATEGORY -l, --language=LANG specify the set of M4 macros to use -C, --cache=DIRECTORY preserve results for future runs in DIRECTORY --no-cache disable the cache -m, --mode=OCTAL change the non trace output file mode (0666) -M, --melt don't use M4 frozen files Languages include: 'Autoconf' create Autoconf configure scripts 'Autotest' create Autotest test suites 'M4sh' create M4sh shell scripts 'M4sugar' create M4sugar output " . Autom4te::ChannelDefs::usage . " The environment variables 'M4' and 'WARNINGS' are honored. Library directories: -B, --prepend-include=DIR prepend directory DIR to search path -I, --include=DIR append directory DIR to search path Tracing: -t, --trace=MACRO[:FORMAT] report the MACRO invocations -p, --preselect=MACRO prepare to trace MACRO in a future run Freezing: -F, --freeze produce an M4 frozen state file for FILES FORMAT defaults to '\$f:\$l:\$n:\$%', and can use the following escapes: \$\$ literal \$ \$f file where macro was called \$l line where macro was called \$d nesting depth of macro call \$n name of the macro \$NUM argument NUM, unquoted and with newlines \$SEP\@ all arguments, with newlines, quoted, and separated by SEP \$SEP* all arguments, with newlines, unquoted, and separated by SEP \$SEP% all arguments, without newlines, unquoted, and separated by SEP SEP can be empty for the default (comma for \@ and *, colon for %), a single character for that character, or {STRING} to use a string. Report bugs to . GNU Autoconf home page: . General help using GNU software: . "; # $VERSION # -------- $version = "autom4te (@PACKAGE_NAME@) @VERSION@ Copyright (C) @RELEASE_YEAR@ Free Software Foundation, Inc. License GPLv3+/Autoconf: GNU GPL version 3 or later , This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Written by Akim Demaille. "; ## ---------- ## ## Routines. ## ## ---------- ## # $OPTION # files_to_options (@FILE) # ------------------------ # Transform Autom4te conventions (e.g., using foo.m4f to designate a frozen # file) into a suitable command line for M4 (e.g., using --reload-state). # parse_args guarantees that we will see at most one frozen file, and that # if a frozen file is present, it is the first argument. sub files_to_options (@) { my (@file) = @_; my @res; foreach my $file (@file) { my $arg = shell_quote ($file); if ($file =~ /\.m4f$/) { $arg = "--reload-state=$arg"; # If the user downgraded M4 from 1.6 to 1.4.x after freezing # the file, then we ensure the frozen __m4_version__ will # not cause m4_init to make the wrong decision about the # current M4 version. $arg .= " --undefine=__m4_version__" unless grep {/__m4_version__/} @m4_builtin; } push @res, $arg; } return join ' ', @res; } # load_configuration ($FILE) # -------------------------- # Load the configuration $FILE. sub load_configuration ($) { my ($file) = @_; use Text::ParseWords; my $cfg = new Autom4te::XFile ($file, "<"); my $lang; while ($_ = $cfg->getline) { chomp; # Comments. next if /^\s*(\#.*)?$/; my @words = shellwords ($_); my $type = shift @words; if ($type eq 'begin-language:') { fatal "$file:$.: end-language missing for: $lang" if defined $lang; $lang = lc $words[0]; } elsif ($type eq 'end-language:') { error "$file:$.: end-language mismatch: $lang" if $lang ne lc $words[0]; $lang = undef; } elsif ($type eq 'args:') { fatal "$file:$.: no current language" unless defined $lang; push @{$language{$lang}}, @words; } else { error "$file:$.: unknown directive: $type"; } } } # parse_args () # ------------- # Process any command line arguments. sub parse_args () { # We want to look for the early options, which should not be found # in the configuration file. Prepend to the user arguments. # Perform this repeatedly so that we can use --language in language # definitions. Beware that there can be several --language # invocations. my @language; do { @language = (); use Getopt::Long; Getopt::Long::Configure ("pass_through", "permute"); GetOptions ("l|language=s" => \@language); foreach (@language) { error "unknown language: $_" unless exists $language{lc $_}; unshift @ARGV, @{$language{lc $_}}; } } while @language; # --debug is useless: it is parsed below. if (exists $ENV{'AUTOM4TE_DEBUG'}) { print STDERR "$me: concrete arguments:\n"; foreach my $arg (@ARGV) { print STDERR "| $arg\n"; } } # Process the arguments for real this time. my @trace; my @prepend_include; my @warnings; getopt ( # Operation modes: "o|output=s" => \$output, "W|warnings=s" => \@warnings, "m|mode=s" => \$mode, "M|melt" => \$melt, # Library directories: "B|prepend-include=s" => \@prepend_include, "I|include=s" => \@include, # Tracing: # Using a hash for traces is seducing. Unfortunately, upon '-t FOO', # instead of mapping 'FOO' to undef, Getopt maps it to '1', preventing # us from distinguishing '-t FOO' from '-t FOO=1'. So let's do it # by hand. "t|trace=s" => \@trace, "p|preselect=s" => \@preselect, # Freezing. "F|freeze" => \$freeze, # Caching. "C|cache=s" => \$cache, "no-cache" => sub { $cache = undef; }, ); parse_WARNINGS; parse_warnings @warnings; fatal "too few arguments Try '$me --help' for more information." unless @ARGV; # Freezing: # We cannot trace at the same time (well, we can, but it sounds insane). # And it implies melting: there is risk not to update properly using # old frozen files, and worse yet: we could load a frozen file and # refreeze it! A sort of caching :) fatal "cannot freeze and trace" if $freeze && @trace; $melt = 1 if $freeze; # Names of the cache directory, cache directory index, trace cache # prefix, and output cache prefix. If the cache is not to be # preserved, default to a temporary directory (automatically removed # on exit). $cache = $tmp unless $cache; $icache = "$cache/requests"; $tcache = "$cache/traces."; $ocache = "$cache/output."; # Normalize the includes: the first occurrence is enough, several is # a pain since it introduces a useless difference in the path which # invalidates the cache. And strip '.' which is implicit and always # first. @include = grep { !/^\.$/ } uniq (reverse(@prepend_include), @include); # Convert @trace to %trace, and work around the M4 builtins tracing # problem. # The default format is '$f:$l:$n:$%'. foreach (@trace) { /^([^:]+)(?::(.*))?$/ms; $trace{$1} = defined $2 ? $2 : '$f:$l:$n:$%'; $trace{$m4_builtin_alternate_name{$1}} = $trace{$1} if exists $m4_builtin_alternate_name{$1}; } # Work around the M4 builtins tracing problem for @PRESELECT. # FIXME: Is this still needed, now that we rely on M4 1.4.5? push (@preselect, map { $m4_builtin_alternate_name{$_} } grep { exists $m4_builtin_alternate_name{$_} } @preselect); # If we find frozen files, then all the files before it are # discarded: the frozen file is supposed to include them all. # # We don't want to depend upon m4's --include to find the top level # files, so we use 'find_file' here. Try to get a canonical name, # as it's part of the key for caching. And some files are optional # (also handled by 'find_file'). my @argv; foreach (@ARGV) { if ($_ eq '-') { push @argv, $_; } elsif (/\.m4f$/) { # Frozen files are optional => pass a '?' to 'find_file'. my $file = find_file ("$_?", @include); if (!$melt && $file) { @argv = ($file); } else { s/\.m4f$/.m4/; push @argv, find_file ($_, @include); } } else { my $file = find_file ($_, @include); push @argv, $file if $file; } } @ARGV = @argv; } # handle_m4 ($REQ, @MACRO) # ------------------------ # Run m4 on the input files, and save the traces on the @MACRO. sub handle_m4 ($@) { my ($req, @macro) = @_; # GNU m4 appends when using --debugfile/--error-output. unlink ($tcache . $req->id . "t"); # Run m4. # # We don't output directly to the cache files, to avoid problems # when we are interrupted (that leaves corrupted files). xsystem ("$m4 @M4_GNU@" . join (' --include=', '', map { shell_quote ($_) } @include) . ' --debug=aflq' . (!exists $ENV{'AUTOM4TE_NO_FATAL'} ? ' --fatal-warning' : '') . " @M4_DEBUGFILE@=" . shell_quote ("$tcache" . $req->id . "t") . join (' --trace=', '', map { shell_quote ($_) } sort @macro) . " " . files_to_options (@ARGV) . " > " . shell_quote ("$ocache" . $req->id . "t")); # Everything went ok: preserve the outputs. foreach my $file (map { $_ . $req->id } ($tcache, $ocache)) { use File::Copy; move ("${file}t", "$file") or fatal "cannot rename ${file}t as $file: $!"; } } # warn_forbidden ($WHERE, $WORD, %FORBIDDEN) # ------------------------------------------ # $WORD is forbidden. Warn with a dedicated error message if in # %FORBIDDEN, otherwise a simple 'error: possibly undefined macro' # will do. my $first_warn_forbidden = 1; sub warn_forbidden ($$%) { my ($where, $word, %forbidden) = @_; my $message; for my $re (sort keys %forbidden) { if ($word =~ $re) { $message = $forbidden{$re}; last; } } $message ||= "possibly undefined macro: $word"; warn "$where: error: $message\n"; if ($first_warn_forbidden) { warn < 'forbid:$1:$2', 'm4_pattern_allow' => 'allow:$1')); my @patterns = new Autom4te::XFile ("$tmp/patterns", "<")->getlines; chomp @patterns; my %forbidden = map { /^forbid:([^:]+):.+$/ => /^forbid:[^:]+:(.+)$/ } @patterns; my $forbidden = join ('|', map { /^forbid:([^:]+)/ } @patterns) || "^\$"; my $allowed = join ('|', map { /^allow:([^:]+)/ } @patterns) || "^\$"; verb "forbidden tokens: $forbidden"; verb "forbidden token : $_ => $forbidden{$_}" foreach (sort keys %forbidden); verb "allowed tokens: $allowed"; # Read the (cached) raw M4 output, produce the actual result. # If we are writing to a regular file, replace it atomically. my $scratchfile; my $out; if ($output eq '-') { # Don't just make $out be STDOUT, because then we would close STDOUT, # which we already do in END. $out = new Autom4te::XFile ('>&STDOUT'); } elsif (-e $output && ! -f $output) { $out = new Autom4te::XFile ($output, '>'); } else { my (undef, $outdir, undef) = fileparse ($output); use File::Temp qw (tempfile); ($out, $scratchfile) = tempfile (UNLINK => 0, DIR => $outdir); fatal "cannot create a file in $outdir: $!" unless $out; # File::Temp doesn't give us access to 3-arg open(2), unfortunately. chmod (oct ($mode) & ~(umask), $scratchfile) or fatal "setting mode of " . $scratchfile . ": $!"; } my $in = new Autom4te::XFile ($ocache . $req->id, "<"); my %prohibited; my $res; while ($_ = $in->getline) { s/\s+$//; s/__oline__/$./g; s/\@<:\@/[/g; s/\@:>\@/]/g; s/\@\{:\@/(/g; s/\@:\}\@/)/g; s/\@S\|\@/\$/g; s/\@%:\@/#/g; $res = $_; # Don't complain in comments. Well, until we have something # better, don't consider '#include' etc. are comments. s/\#.*// unless /^\#\s*(if|include|endif|ifdef|ifndef|define)\b/; foreach (split (/\W+/)) { $prohibited{$_} = $. if !/^$/ && /$forbidden/o && !/$allowed/o && ! exists $prohibited{$_}; } # Performed *last*: the empty quadrigraph. $res =~ s/\@&t\@//g; print $out "$res\n"; } $out->close(); # Always update the file, even if it didn't change; # Automake relies on this. update_file ($scratchfile, $output, 1) if defined $scratchfile; # If no forbidden words, we're done. return if ! %prohibited; # Locate the forbidden words in the last input file. # This is unsatisfying but... $exit_code = 1; if ($ARGV[$#ARGV] ne '-') { my $prohibited = '\b(' . join ('|', keys %prohibited) . ')\b'; my $file = new Autom4te::XFile ($ARGV[$#ARGV], "<"); while ($_ = $file->getline) { # Don't complain in comments. Well, until we have something # better, don't consider '#include' etc. to be comments. s/\#.*// unless /^\#(if|include|endif|ifdef|ifndef|define)\b/; # Complain once per word, but possibly several times per line. while (/$prohibited/) { my $word = $1; warn_forbidden ("$ARGV[$#ARGV]:$.", $word, %forbidden); delete $prohibited{$word}; # If we're done, exit. return if ! %prohibited; $prohibited = '\b(' . join ('|', keys %prohibited) . ')\b'; } } } warn_forbidden ("$output:$prohibited{$_}", $_, %forbidden) foreach (sort { $prohibited{$a} <=> $prohibited{$b} } keys %prohibited); } ## --------------------- ## ## Handling the traces. ## ## --------------------- ## # $M4_MACRO # trace_format_to_m4 ($FORMAT) # ---------------------------- # Convert a trace $FORMAT into a M4 trace processing macro's body. sub trace_format_to_m4 ($) { my ($format) = @_; my $underscore = $_; my %escape = (# File name. 'f' => '$1', # Line number. 'l' => '$2', # Depth. 'd' => '$3', # Name (also available as $0). 'n' => '$4', # Escaped dollar. '$' => '$'); my $res = ''; $_ = $format; while ($_) { # $n -> $(n + 4) if (s/^\$(\d+)//) { $res .= "\$" . ($1 + 4); } # $x, no separator given. elsif (s/^\$([fldn\$])//) { $res .= $escape{$1}; } # $.x or ${sep}x. elsif (s/^\$\{([^}]*)\}([@*%])// || s/^\$(.?)([@*%])//) { # $@, list of quoted effective arguments. if ($2 eq '@') { $res .= ']at_at([' . ($1 ? $1 : ',') . '], $@)['; } # $*, list of unquoted effective arguments. elsif ($2 eq '*') { $res .= ']at_star([' . ($1 ? $1 : ',') . '], $@)['; } # $%, list of flattened unquoted effective arguments. elsif ($2 eq '%') { $res .= ']at_percent([' . ($1 ? $1 : ':') . '], $@)['; } } elsif (/^(\$.)/) { error "invalid escape: $1"; } else { s/^([^\$]+)//; $res .= $1; } } $_ = $underscore; return '[[' . $res . ']]'; } # handle_traces($REQ, $OUTPUT, %TRACE) # ------------------------------------ # We use M4 itself to process the traces. But to avoid name clashes when # processing the traces, the builtins are disabled, and moved into 'at_'. # Actually, all the low level processing macros are in 'at_' (and '_at_'). # To avoid clashes between user macros and 'at_' macros, the macros which # implement tracing are in 'AT_'. # # Having $REQ is needed to neutralize the macros which have been traced, # but are not wanted now. sub handle_traces ($$%) { my ($req, $output, %trace) = @_; verb "formatting traces for '$output': " . join (', ', sort keys %trace); # Processing the traces. my $trace_m4 = new Autom4te::XFile ("$tmp/traces.m4", ">"); $_ = <<'EOF'; divert(-1) changequote([, ]) # _at_MODE(SEPARATOR, ELT1, ELT2...) # ---------------------------------- # List the elements, separating then with SEPARATOR. # MODE can be: # 'at' -- the elements are enclosed in brackets. # 'star' -- the elements are listed as are. # 'percent' -- the elements are 'flattened': spaces are singled out, # and no new line remains. define([_at_at], [at_ifelse([$#], [1], [], [$#], [2], [[[$2]]], [[[$2]][$1]$0([$1], at_shift(at_shift($@)))])]) define([_at_percent], [at_ifelse([$#], [1], [], [$#], [2], [at_flatten([$2])], [at_flatten([$2])[$1]$0([$1], at_shift(at_shift($@)))])]) define([_at_star], [at_ifelse([$#], [1], [], [$#], [2], [[$2]], [[$2][$1]$0([$1], at_shift(at_shift($@)))])]) # FLATTEN quotes its result. # Note that the second pattern is 'newline, tab or space'. Don't lose # the tab! define([at_flatten], [at_patsubst(at_patsubst([[[$1]]], [\\\n]), [[\n\t ]+], [ ])]) define([at_args], [at_shift(at_shift(at_shift(at_shift(at_shift($@)))))]) define([at_at], [_$0([$1], at_args($@))]) define([at_percent], [_$0([$1], at_args($@))]) define([at_star], [_$0([$1], at_args($@))]) EOF s/^ //mg;s/\\t/\t/mg;s/\\n/\n/mg; print $trace_m4 $_; # If you trace 'define', then on 'define([m4_exit], defn([m4exit])' you # will produce # # AT_define([m4sugar.m4], [115], [1], [define], [m4_exit], ) # # Since '' is not quoted, the outer m4, when processing # 'trace.m4' will exit prematurely. Hence, move all the builtins to # the 'at_' name space. print $trace_m4 "# Copy the builtins.\n"; map { print $trace_m4 "define([at_$_], defn([$_]))\n" } @m4_builtin; print $trace_m4 "\n"; print $trace_m4 "# Disable them.\n"; map { print $trace_m4 "at_undefine([$_])\n" } @m4_builtin; print $trace_m4 "\n"; # Neutralize traces: we don't want traces of cached requests (%REQUEST). print $trace_m4 "## -------------------------------------- ##\n", "## By default neutralize all the traces. ##\n", "## -------------------------------------- ##\n", "\n"; print $trace_m4 "at_define([AT_$_], [at_dnl])\n" foreach (sort keys %{$req->macro}); print $trace_m4 "\n"; # Implement traces for current requests (%TRACE). print $trace_m4 "## ------------------------- ##\n", "## Trace processing macros. ##\n", "## ------------------------- ##\n", "\n"; foreach (sort keys %trace) { # Trace request can be embed \n. (my $comment = "Trace $_:$trace{$_}") =~ s/^/\# /; print $trace_m4 "$comment\n"; print $trace_m4 "at_define([AT_$_],\n"; print $trace_m4 trace_format_to_m4 ($trace{$_}) . ")\n\n"; } print $trace_m4 "\n"; # Reenable output. print $trace_m4 "at_divert(0)at_dnl\n"; # Transform the traces from m4 into an m4 input file. # Typically, transform: # # | m4trace:configure.ac:3: -1- AC_SUBST([exec_prefix], [NONE]) # # into # # | AT_AC_SUBST([configure.ac], [3], [1], [AC_SUBST], [exec_prefix], [NONE]) # # Pay attention that the file name might include colons, if under DOS # for instance, so we don't use '[^:]+'. my $traces = new Autom4te::XFile ($tcache . $req->id, "<"); while ($_ = $traces->getline) { # Trace with arguments, as the example above. We don't try # to match the trailing parenthesis as it might be on a # separate line. s{^m4trace:(.+):(\d+): -(\d+)- ([^(]+)\((.*)$} {AT_$4([$1], [$2], [$3], [$4], $5}; # Traces without arguments, always on a single line. s{^m4trace:(.+):(\d+): -(\d+)- ([^)]*)\n$} {AT_$4([$1], [$2], [$3], [$4])\n}; print $trace_m4 "$_"; } $trace_m4->close; my $in = new Autom4te::XFile ("$m4 " . shell_quote ("$tmp/traces.m4") . " |"); my $out = new Autom4te::XFile; if ($output eq '-') { $out->open (">$output"); } else { $out->open ($output, ">"); } # This is dubious: should we really transform the quadrigraphs in # traces? It might break balanced [ ] etc. in the output. The # consensus seems to be that traces are more useful this way. while ($_ = $in->getline) { # It makes no sense to try to transform __oline__. s/\@<:\@/[/g; s/\@:>\@/]/g; s/\@\{:\@/(/g; s/\@:\}\@/)/g; s/\@S\|\@/\$/g; s/\@%:\@/#/g; s/\@&t\@//g; print $out $_; } } # $BOOL # up_to_date ($REQ) # ----------------- # Are the cache files of $REQ up to date? # $REQ is 'valid' if it corresponds to the request and exists, which # does not mean it is up to date. It is up to date if, in addition, # its files are younger than its dependencies. sub up_to_date ($) { my ($req) = @_; return 0 if ! $req->valid; my $tfile = $tcache . $req->id; my $ofile = $ocache . $req->id; # We can't answer properly if the traces are not computed since we # need to know what other files were included. Actually, if any of # the cache files is missing, we are not up to date. return 0 if ! -f $tfile || ! -f $ofile; # The youngest of the cache files must be older than the oldest of # the dependencies. # FIXME: These timestamps have only 1-second resolution. # Time::HiRes fixes this, but assumes Perl 5.8 or later. my $tmtime = mtime ($tfile); my $omtime = mtime ($ofile); my ($file, $mtime) = ($tmtime < $omtime ? ($ofile, $omtime) : ($tfile, $tmtime)); # stdin is always out of date. if (grep { $_ eq '-' } @ARGV) { return 0 } # We depend at least upon the arguments. foreach my $dep (@ARGV) { if ($mtime < mtime ($dep)) { verb "up_to_date ($file): outdated: $dep"; return 0; } } # Files may include others. We can use traces since we just checked # if they are available. handle_traces ($req, "$tmp/dependencies", ('include' => '$1', 'm4_include' => '$1')); my $deps = new Autom4te::XFile ("$tmp/dependencies", "<"); while ($_ = $deps->getline) { chomp; my $dep = find_file ("$_?", @include); # If a file which used to be included is no longer there, then # don't say it's missing (it might no longer be included). But # of course, that causes the output to be outdated (as if the # timestamp of that missing file was newer). return 0 if ! $dep; if ($mtime < mtime ($dep)) { verb "up_to_date ($file): outdated: $dep"; return 0; } } verb "up_to_date ($file): up to date"; return 1; } ## ---------- ## ## Freezing. ## ## ---------- ## # freeze ($OUTPUT) # ---------------- sub freeze ($) { my ($output) = @_; # When processing the file with diversion disabled, there must be no # output but comments and empty lines. my $result = xqx ("$m4" . ' --fatal-warning' . join (' --include=', '', map { shell_quote ($_) } @include) . ' --define=divert' . " " . files_to_options (@ARGV) . ' lock (LOCK_EX) if ($flock_implemented eq "yes"); # Read the cache index if available and older than autom4te itself. # If autom4te is younger, then some structures such as C4che might # have changed, which would corrupt its processing. Autom4te::C4che->load ($icache_file) if (-f $icache && mtime ($icache) > mtime ($0) && Autom4te::C4che->good_version ($icache_file, '@VERSION@')); # Add the new trace requests. my $req = Autom4te::C4che->request ('input' => \@ARGV, 'path' => \@include, 'macro' => [keys %trace, @preselect]); # If $REQ's cache files are not up to date, or simply if the user # discarded them (-f), declare it invalid. $req->valid (0) if $force || ! up_to_date ($req); # We now know whether we can trust the Request object. Say it. verb "the trace request object is:\n" . $req->marshall; # We need to run M4 if (i) the user wants it (--force), (ii) $REQ is # invalid. handle_m4 ($req, keys %{$req->macro}) if $force || ! $req->valid; # Issue the warnings each time autom4te was run. my $separator = "\n" . ('-' x 25) . " END OF WARNING " . ('-' x 25) . "\n\n"; handle_traces ($req, "$tmp/warnings", ('_m4_warn' => "\$1::\$f:\$l::\$2::\$3$separator")); # Swallow excessive newlines. for (split (/\n*$separator\n*/o, contents ("$tmp/warnings"))) { # The message looks like: # | syntax::input.as:5::ouch # | ::input.as:4: baz is expanded from... # | input.as:2: bar is expanded from... # | input.as:3: foo is expanded from... # | input.as:5: the top level # In particular, m4_warn guarantees that either $stackdump is empty, or # it consists of lines where only the last line ends in "top level". my ($cat, $loc, $msg, $stacktrace) = split ('::', $_, 4); # There might not have been a stacktrace. $stacktrace = '' unless defined $stacktrace; msg $cat, $loc, $msg, partial => ($stacktrace =~ /top level$/) + 0; for (split /\n/, $stacktrace) { my ($loc, $trace) = split (': ', $_, 2); msg $cat, $loc, $trace, partial => ($trace !~ /top level$/) + 0; } } # Now output... if (%trace) { # Always produce traces, since even if the output is young enough, # there is no guarantee that the traces use the same *format* # (e.g., '-t FOO:foo' and '-t FOO:bar' are both using the same M4 # traces, hence the M4 traces cache is usable, but its formatting # will yield different results). handle_traces ($req, $output, %trace); } else { # Actual M4 expansion, if the user wants it, or if $output is old # (STDOUT is pretty old). handle_output ($req, $output) if $force || mtime ($output) < mtime ($ocache . $req->id); } # If we ran up to here, the cache is valid. $req->valid (1); Autom4te::C4che->save ($icache_file, '@VERSION@'); exit $exit_code; ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: autoconf-2.71/bin/autoreconf.in0000644000000000000000000006443314004621270013423 00000000000000#! @PERL@ # -*- perl -*- # @configure_input@ eval 'case $# in 0) exec @PERL@ -S "$0";; *) exec @PERL@ -S "$0" "$@";; esac' if 0; # autoreconf - install the GNU Build System in a directory tree # Copyright (C) 1994, 1999-2017, 2020-2021 Free Software Foundation, # Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Written by David J. MacKenzie. # Extended and rewritten in Perl by Akim Demaille. use 5.006; use strict; use warnings FATAL => 'all'; my $buildauxdir; BEGIN { my $pkgdatadir = $ENV{'autom4te_perllibdir'} || '@pkgdatadir@'; unshift @INC, $pkgdatadir; $buildauxdir = $ENV{'autom4te_buildauxdir'} || $pkgdatadir . '/build-aux'; # Override SHELL. On DJGPP SHELL may not be set to a shell # that can handle redirection and quote arguments correctly, # e.g.: COMMAND.COM. For DJGPP always use the shell that configure # has detected. $ENV{'SHELL'} = '@SHELL@' if ($^O eq 'dos'); } # Do not use Cwd::chdir, since it might hang. use Cwd qw (cwd); use File::Copy qw (copy); use File::Temp qw (tempfile); use Autom4te::ChannelDefs; use Autom4te::Channels; use Autom4te::Configure_ac; use Autom4te::FileUtils; use Autom4te::General; use Autom4te::XFile; ## ----------- ## ## Variables. ## ## ----------- ## # $HELP # ----- $help = "Usage: $0 [OPTION]... [DIRECTORY]... Run 'autoconf' and, when needed, 'aclocal', 'autoheader', 'automake', 'autopoint' (formerly 'gettextize'), 'libtoolize', 'intltoolize', and 'gtkdocize' to regenerate the GNU Build System files in specified DIRECTORIES and their subdirectories (defaulting to '.'). By default, it only remakes those files that are older than their sources. If you install new versions of the GNU Build System, you can make 'autoreconf' remake all of the files by giving it the '--force' option. Operation modes: -h, --help print this help, then exit -V, --version print version number, then exit -v, --verbose verbosely report processing -d, --debug don't remove temporary files -f, --force consider all generated and standard files obsolete -i, --install copy missing standard auxiliary files --no-recursive don't rebuild sub-packages -s, --symlink with -i, install symbolic links instead of copies -m, --make when applicable, re-run ./configure && make -W, --warnings=CATEGORY report the warnings falling in CATEGORY [syntax] " . Autom4te::ChannelDefs::usage . " The environment variable 'WARNINGS' is honored. Some subtools might support other warning types, using 'all' is encouraged. Library directories: -B, --prepend-include=DIR prepend directory DIR to search path -I, --include=DIR append directory DIR to search path The environment variables AUTOCONF, ACLOCAL, AUTOHEADER, AUTOM4TE, AUTOMAKE, AUTOPOINT, GTKDOCIZE, INTLTOOLIZE, LIBTOOLIZE, M4, and MAKE are honored. Report bugs to . GNU Autoconf home page: . General help using GNU software: . "; # $VERSION # -------- $version = "autoreconf (@PACKAGE_NAME@) @VERSION@ Copyright (C) @RELEASE_YEAR@ Free Software Foundation, Inc. License GPLv3+/Autoconf: GNU GPL version 3 or later , This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Written by David J. MacKenzie and Akim Demaille. "; # Lib files. my $autoconf = $ENV{'AUTOCONF'} || '@bindir@/@autoconf-name@'; my $autoheader = $ENV{'AUTOHEADER'} || '@bindir@/@autoheader-name@'; my $autom4te = $ENV{'AUTOM4TE'} || '@bindir@/@autom4te-name@'; my $automake = $ENV{'AUTOMAKE'} || 'automake'; my $aclocal = $ENV{'ACLOCAL'} || 'aclocal'; my $libtoolize = $ENV{'LIBTOOLIZE'} || 'libtoolize'; my $intltoolize = $ENV{'INTLTOOLIZE'} || 'intltoolize'; my $gtkdocize = $ENV{'GTKDOCIZE'} || 'gtkdocize'; my $autopoint = $ENV{'AUTOPOINT'} || 'autopoint'; my $make = $ENV{'MAKE'} || 'make'; # --install -- as --add-missing in other tools. my $install = 0; # symlink -- when --install, use symlinks instead. my $symlink = 0; my @prepend_include; my @include; # Rerun './configure && make'? my $run_make = 0; # Recurse into subpackages my $recursive = 1; ## ---------- ## ## Routines. ## ## ---------- ## # parse_args () # ------------- # Process any command line arguments. sub parse_args () { my $srcdir; # List of command line warning requests. my @warning; getopt ("W|warnings=s" => \@warning, 'I|include=s' => \@include, 'B|prepend-include=s' => \@prepend_include, 'i|install' => \$install, 's|symlink' => \$symlink, 'm|make' => \$run_make, 'recursive!' => \$recursive); # Split the warnings as a list of elements instead of a list of # lists. @warning = map { split /,/ } @warning; parse_WARNINGS; parse_warnings @warning; # Even if the user specified a configure.ac, trim to get the # directory, and look for configure.ac again. Because (i) the code # is simpler, and (ii) we are still able to diagnose simultaneous # presence of configure.ac and configure.in. @ARGV = map { /configure\.(ac|in)$/ ? dirname ($_) : $_ } @ARGV; push @ARGV, '.' unless @ARGV; if ($verbose && $debug) { for my $prog ($autoconf, $autoheader, $automake, $aclocal, $autopoint, $libtoolize, $intltoolize, $gtkdocize) { xsystem ("$prog --version | sed 1q >&2"); print STDERR "\n"; } } # Dispatch autoreconf's option to the tools. # --include; $aclocal .= join (' -I ', '', map { shell_quote ($_) } @include); $autoconf .= join (' --include=', '', map { shell_quote ($_) } @include); $autoconf .= join (' --prepend-include=', '', map { shell_quote ($_) } @prepend_include); $autoheader .= join (' --include=', '', map { shell_quote ($_) } @include); $autoheader .= join (' --prepend-include=', '', map { shell_quote ($_) } @prepend_include); # --install and --symlink; if ($install) { $automake .= ' --add-missing'; $automake .= ' --copy' unless $symlink; $libtoolize .= ' --copy' unless $symlink; $intltoolize .= ' --copy' unless $symlink; $gtkdocize .= ' --copy' unless $symlink; } # --force; if ($force) { $aclocal .= ' --force'; $autoconf .= ' --force'; $autoheader .= ' --force'; $automake .= ' --force-missing'; $autopoint .= ' --force'; $libtoolize .= ' --force'; $intltoolize .= ' --force'; } else { $automake .= ' --no-force'; } # --verbose --verbose or --debug; if ($verbose > 1 || $debug) { $autoconf .= ' --verbose'; $autoheader .= ' --verbose'; $automake .= ' --verbose'; $aclocal .= ' --verbose'; } if ($debug) { $autoconf .= ' --debug'; $autoheader .= ' --debug'; $libtoolize .= ' --debug'; } # Pass down warnings via the WARNINGS environment variable, instead # of via --warnings, so that unrecognized warning categories are # silently ignored. We already issued diagnostics about warning # categories *we* don't recognize; older subsidiary tools may not # know all of them, and may treat unrecognized warning categories on # the command line as a fatal error when -Werror is in effect. $ENV{WARNINGS} = merge_WARNINGS @warning; verb "export WARNINGS=$ENV{WARNINGS}"; } ## ----------------------- ## ## Handling of aux files. ## ## ----------------------- ## # find_missing_aux_files # ---------------------- # Look in $aux_dir (or, if that is empty, ., .., and ../..) for all of the # files in @$aux_files; return a list of those that do not exist. sub find_missing_aux_files { my ($aux_files, $aux_dir) = @_; my @aux_dirs; if ($aux_dir) { push @aux_dirs, $aux_dir; } else { @aux_dirs = qw(. .. ../..); } # If we find all the aux files in _some_ directory in @aux_dirs, we're # good. But if we don't find all of them in any directory in @aux_dirs, # return the set of missing files from the _first_ directory in @aux_dirs; # this will be less confusing in the common case where AC_CONFIG_AUX_DIR # wasn't used and the parent directories don't provide any aux files. my @missing_aux_files; my @first_missing_aux_files; for my $dir (@aux_dirs) { @missing_aux_files = (); for my $file (@{$aux_files}) { push @missing_aux_files, $file unless -e "${dir}/${file}"; } return () if !@missing_aux_files; @first_missing_aux_files = @missing_aux_files unless @first_missing_aux_files; } return @first_missing_aux_files; } # can_install_aux_files # --------------------- # Report whether all of the files listed in @_ exist in $buildauxdir, # which means we could install them. sub can_install_aux_files { local $_; for (@_) { return 0 unless -f "${buildauxdir}/$_"; } return 1; } # extract_time_stamp ($fname) # --------------------------- # Extract a timestamp line from $fname. # This is hardwired to know what to look for in the files we currently install. sub extract_time_stamp { my $fname = shift; open my $fh, '<', $fname or fatal "opening $fname: $!"; while (my $l = <$fh>) { if ($l =~ /^(?:scriptversion|timestamp)='?(\d\d\d\d-\d\d-\d\d(?:\.\d\d)?)/) { return $1; } } # Old versions of install-sh did not have a timestamp line. return '1970-01-01'; } # our_aux_file_is_newer ($dest, $src) # ----------------------------------- # True if our copy of an aux file ($src) has a newer 'timestamp' line # than the matching line in $dest. sub our_aux_file_is_newer { my ($dest, $src) = @_; my $dstamp = extract_time_stamp ($dest); my $sstamp = extract_time_stamp ($src); return $sstamp gt $dstamp; } # try_install_aux_files # --------------------- # Install each of the aux files listed in @$auxfiles, that we are able # to install, into $destdir. # Remove the files we were able to install from @$auxfiles. sub try_install_aux_files { my ($auxfiles, $destdir) = @_; my @unable; for my $f (@$auxfiles) { my $src = "${buildauxdir}/$f"; if (-f $src) { install_aux_file ($destdir, $f, $src); } else { push @unable, $f; } } @$auxfiles = @unable; } # install_aux_file # ---------------- # Install the file $src as $destdir/$f, honoring --symlink and --force. sub install_aux_file { my ($destdir, $f, $src) = @_; my $dest = "${destdir}/$f"; if (-e $dest && ! our_aux_file_is_newer ($dest, $src)) { return; } if ($symlink) { if ($force || ! -l $dest || readlink $dest != $src) { if (-e $dest) { unlink $dest or fatal "rm -f $dest: $!"; } verb "linking $dest to $src"; symlink $src, $dest or fatal "ln -s $src $dest: $!"; } } else { if (-e $dest && ! -f $dest) { unlink $dest or fatal "rm -f $dest: $!"; } my ($temp, $tempname) = tempfile (UNLINK => 0, DIR => $destdir); copy ($src, $tempname) or fatal "copying $src to $tempname: $!"; make_executable ($tempname) if -x $src; update_file ($tempname, $dest, $force); } } # make_executable # --------------- # Make the file $f be executable by all users it is currently readable by. sub make_executable { my $f = shift; my $perm = (stat $f)[2] & 07777; $perm |= 0100 if ($perm & 0400); $perm |= 0010 if ($perm & 0040); $perm |= 0001 if ($perm & 0004); chmod $perm, $f or fatal "chmod $f: $!"; } ## -------------------------- ## ## Per-directory operations. ## ## -------------------------- ## # &autoreconf_current_directory # ----------------------------- sub autoreconf_current_directory ($) { my ($directory) = @_; my $configure_ac = find_configure_ac; # ---------------------- # # Is it using Autoconf? # # ---------------------- # my $uses_autoconf; my $uses_gettext; if (-f $configure_ac) { my $configure_ac_file = new Autom4te::XFile ("$configure_ac", "<"); while ($_ = $configure_ac_file->getline) { s/#.*//; s/dnl.*//; $uses_autoconf = 1 if /AC_INIT/; # See below for why we look for gettext here. $uses_gettext = 1 if /^AM_GNU_GETTEXT_(?:REQUIRE_)?VERSION/; } if (!$uses_autoconf) { error "$configure_ac: AC_INIT not found; not an autoconf script?"; return; } } else { verb "neither configure.ac nor configure.in present in $directory"; return; } # ------------------- # # Running autopoint. # # ------------------- # # Gettext is a bit of a problem: its macros are not necessarily # visible to aclocal, so if we start with a completely stripped down # package (think of a fresh CVS checkout), running 'aclocal' first # will fail: the Gettext macros are missing. # # Therefore, we can't use the traces to decide if we use Gettext or # not. I guess that once Gettext move to 2.5x we will be able to, # but in the meanwhile forget it. # # We can only grep for AM_GNU_GETTEXT_(REQUIRE_)?VERSION in configure.ac. # You might think this approach is naive, and indeed it is, as it prevents # one to embed AM_GNU_GETTEXT_(REQUIRE_)?VERSION in another *.m4, but # anyway we don't limit the generality, since... that's what autopoint does. # Actually, it is even more restrictive, as it greps for # '^AM_GNU_GETTEXT_(REQUIRE_)?VERSION('. We did this above, while # scanning configure.ac. if (!$uses_gettext) { verb "$configure_ac: not using Gettext"; } elsif (!$install) { verb "$configure_ac: not running autopoint: --install not given"; } else { xsystem_hint ("autopoint is needed because this package uses Gettext", $autopoint); } # ----------------- # # Running aclocal. # # ----------------- # # Run it first: it might discover new macros to add, e.g., # AC_PROG_LIBTOOL, which we will trace later to see if Libtool is # used. # # Always run it. Tracking its sources for up-to-dateness is too # complex and too error prone. The best we can do is avoiding # nuking the timestamp. my $uses_aclocal = 1; # Nevertheless, if aclocal.m4 exists and is not made by aclocal, # don't run aclocal. if (-f 'aclocal.m4') { my $aclocal_m4 = new Autom4te::XFile 'aclocal.m4'; $_ = $aclocal_m4->getline; $uses_aclocal = 0 unless defined ($_) && /generated.*by aclocal/; } # If there are flags for aclocal in Makefile.am, use them. my $aclocal_flags = ''; if ($uses_aclocal && -f 'Makefile.am') { my $makefile = new Autom4te::XFile 'Makefile.am'; while ($_ = $makefile->getline) { if (/^ACLOCAL_[A-Z_]*FLAGS\s*=\s*(.*)/) { $aclocal_flags = $1; last; } } } if (!$uses_aclocal) { verb "$configure_ac: not using aclocal"; } else { # Some file systems have sub-second timestamps, and if so we may # run into trouble later, after we rerun autoconf and set the # timestamps of input files to be no greater than aclocal.m4, # because the time-stamp-setting operation (utime) has a # resolution of only 1 second. Work around the problem by # ensuring that there is at least a one-second window before the # timestamp of aclocal.m4t in which no file timestamps can # fall. sleep 1; xsystem ("$aclocal $aclocal_flags"); } # We might have to rerun aclocal if Libtool (or others) imports new # macros. my $rerun_aclocal = 0; # ------------------------------- # # See what tools will be needed. # # ------------------------------- # # Perform a single trace reading to avoid --force forcing a rerun # between two --trace, that's useless. If there is no AC_INIT, then # it's not an Autoconf script; ignore it. # Suppress all warnings from this invocation; they may be spurious # due to out-of-date files, and in any case they'll duplicate warnings # from the final autoconf invocation. my $aux_dir; my @aux_files; my $uses_gettext_via_traces; my $uses_libtool; my $uses_intltool; my $uses_gtkdoc; my $uses_libltdl; my $uses_autoheader; my $uses_automake; my @subdir; my $traces; verb "$configure_ac: tracing"; { local $ENV{WARNINGS} = 'none'; $traces = new Autom4te::XFile ("$autoconf" . join (' ', map { ' --trace=' . $_ . ':\$n::\${::}%' } # If you change this list, update the # 'Autoreconf-preselections' section of autom4te.in. 'AC_CONFIG_AUX_DIR', 'AC_CONFIG_HEADERS', 'AC_CONFIG_SUBDIRS', 'AC_INIT', 'AC_REQUIRE_AUX_FILE', 'AC_PROG_LIBTOOL', 'AM_PROG_LIBTOOL', 'LT_INIT', 'LT_CONFIG_LTDL_DIR', 'AM_GNU_GETTEXT', 'AM_INIT_AUTOMAKE', 'GTK_DOC_CHECK', 'IT_PROG_INTLTOOL', ) . ' |'); } while ($_ = $traces->getline) { chomp; my ($macro, @args) = split (/::/); $aux_dir = $args[0] if $macro eq "AC_CONFIG_AUX_DIR"; push @aux_files, $args[0] if $macro eq "AC_REQUIRE_AUX_FILE"; $uses_autoconf = 1 if $macro eq "AC_INIT"; $uses_gettext_via_traces = 1 if $macro eq "AM_GNU_GETTEXT"; $uses_libtool = 1 if $macro eq "AC_PROG_LIBTOOL" || $macro eq "AM_PROG_LIBTOOL" || $macro eq "LT_INIT"; $uses_libltdl = 1 if $macro eq "LT_CONFIG_LTDL_DIR"; $uses_autoheader = 1 if $macro eq "AC_CONFIG_HEADERS"; $uses_automake = 1 if $macro eq "AM_INIT_AUTOMAKE"; $uses_intltool = 1 if $macro eq "IT_PROG_INTLTOOL"; $uses_gtkdoc = 1 if $macro eq "GTK_DOC_CHECK"; push @subdir, split (' ', $args[0] || '') if $macro eq "AC_CONFIG_SUBDIRS" && $recursive; } $traces->close; # The subdirs are *optional*, they may not exist. foreach (@subdir) { if (-d) { verb "$configure_ac: adding subdirectory $_ to autoreconf"; autoreconf ($_); } else { verb "$configure_ac: subdirectory $_ not present"; } } # Gettext consistency checks. # Some projects intentionally don't call AM_GNU_GETTEXT_(REQUIRE_)VERSION # because they have all of the gettext infrastructure checked into version # control and they want us to _not_ run autopoint. Therefore, these # diagnostics are only warnings. msg('syntax', $configure_ac, "AM_GNU_GETTEXT is used, but not AM_GNU_GETTEXT_VERSION" . " or AM_GNU_GETTEXT_REQUIRE_VERSION") if $uses_gettext_via_traces && ! $uses_gettext; msg('syntax', $configure_ac, "AM_GNU_GETTEXT_VERSION or AM_GNU_GETTEXT_REQUIRE_VERSION is used," . " but not AM_GNU_GETTEXT") if $uses_gettext && ! $uses_gettext_via_traces; # ---------------------------- # # Setting up the source tree. # # ---------------------------- # # libtoolize, automake --add-missing etc. will drop files in the # $AUX_DIR. But these tools fail to install these files if the # directory itself does not exist, which valid: just imagine a CVS # repository with hand written code only (there is not even a need # for a Makefile.am!). if ($install && defined $aux_dir && ! -d $aux_dir) { verb "$configure_ac: creating directory $aux_dir"; mkdir $aux_dir, 0755 or error "cannot create $aux_dir: $!"; } # -------------------- # # Running libtoolize. # # -------------------- # if (!$uses_libtool) { verb "$configure_ac: not using Libtool"; } elsif ($install) { if ($uses_libltdl) { $libtoolize .= " --ltdl"; } xsystem_hint ("libtoolize is needed because this package uses Libtool", $libtoolize); $rerun_aclocal = 1; } else { verb "$configure_ac: not running libtoolize: --install not given"; } # --------------------- # # Running intltoolize. # # --------------------- # if (!$uses_intltool) { verb "$configure_ac: not using Intltool"; } elsif ($install) { xsystem_hint ("intltoolize is needed because this package uses Intltool", $intltoolize); } else { verb "$configure_ac: not running intltool: --install not given"; } # ------------------- # # Running gtkdocize. # # ------------------- # if (!$uses_gtkdoc) { verb "$configure_ac: not using Gtkdoc"; } elsif ($install) { xsystem_hint ("gtkdocize is needed because this package uses Gtkdoc", $gtkdocize); } else { verb "$configure_ac: not running gtkdocize: --install not given"; } # ------------------- # # Rerunning aclocal. # # ------------------- # # If we re-installed Libtool or Gettext, the macros might have changed. # Automake also needs an up-to-date aclocal.m4. if ($rerun_aclocal) { if (!$uses_aclocal) { verb "$configure_ac: not using aclocal"; } else { xsystem ("$aclocal $aclocal_flags"); } } # ------------------ # # Running autoconf. # # ------------------ # # Don't try to be smarter than 'autoconf', which does its own up to # date checks. # # We prefer running autoconf before autoheader, because (i) the # latter runs the former, and (ii) autoconf is stricter than # autoheader. So all in all, autoconf should give better error # messages. xsystem ($autoconf); # -------------------- # # Running autoheader. # # -------------------- # # We now consider that if AC_CONFIG_HEADERS is used, then autoheader # is used too. # # Just as for autoconf, up to date ness is performed by the tool # itself. # # Run it before automake, since the latter checks the presence of # config.h.in when it sees an AC_CONFIG_HEADERS. if (!$uses_autoheader) { verb "$configure_ac: not using Autoheader"; } else { xsystem ($autoheader); } # ------------------ # # Running automake. # # ------------------ # if (!$uses_automake) { verb "$configure_ac: not using Automake"; } else { # We should always run automake, and let it decide whether it shall # update the file or not. In fact, the effect of '$force' is already # included in '$automake' via '--no-force'. xsystem ($automake); } # ---------------------------------------------------- # # Installing aux files and checking for missing ones. # # ---------------------------------------------------- # try_install_aux_files (\@aux_files, $aux_dir || '.') if $install && $force; my @missing_aux_files = find_missing_aux_files (\@aux_files, $aux_dir); if (@missing_aux_files) { try_install_aux_files (\@missing_aux_files, $aux_dir || '.') if $install && !$force; for (0 .. $#missing_aux_files) { my $f = $missing_aux_files[$_]; if ($_ == $#missing_aux_files) { # Offer some advice if --install wasn't given and has a # chance of helping. my $trailer = ""; $trailer = "\n try running autoreconf --install" if (!$install && ($uses_automake || $uses_libtool || $uses_intltool || $uses_gtkdoc || can_install_aux_files @missing_aux_files)); error $configure_ac, "required file '$f' not found$trailer"; } else { error $configure_ac, "required file '$f' not found"; } } } # -------------- # # Running make. # # -------------- # if ($run_make) { if (!-f "config.status") { verb "no config.status: cannot re-make"; } else { xsystem ("./config.status --recheck"); xsystem ("./config.status"); if (!-f "Makefile") { verb "no Makefile: cannot re-make"; } else { xsystem ("$make"); } } } } # &autoreconf ($DIRECTORY) # ------------------------ # Reconf the $DIRECTORY. sub autoreconf ($) { my ($directory) = @_; my $cwd = cwd; # The format for this message is not free: taken from Emacs, itself # using GNU Make's format. verb "Entering directory '$directory'"; chdir $directory or error "cannot chdir to $directory: $!"; autoreconf_current_directory ($directory); # The format is not free: taken from Emacs, itself using GNU Make's # format. verb "Leaving directory '$directory'"; chdir $cwd or error "cannot chdir to $cwd: $!"; } ## ------ ## ## Main. ## ## ------ ## # When debugging, it is convenient that all the related temporary # files be at the same place. mktmpdir ('ar'); $ENV{'TMPDIR'} = $tmp; parse_args; # Autoreconf all the given configure.ac. Unless '--no-recursive' is passed, # AC_CONFIG_SUBDIRS will be traversed in &autoreconf_current_directory. $ENV{'AUTOM4TE'} = $autom4te; for my $directory (@ARGV) { require_configure_ac ($directory); autoreconf ($directory); } exit $exit_code; ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: autoconf-2.71/bin/autoscan.in0000644000000000000000000004136014004621270013065 00000000000000#! @PERL@ # -*- perl -*- # @configure_input@ # autoscan - Create configure.scan (a preliminary configure.ac) for a package. # Copyright (C) 1994, 1999-2017, 2020-2021 Free Software Foundation, # Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Written by David MacKenzie . eval 'case $# in 0) exec @PERL@ -S "$0";; *) exec @PERL@ -S "$0" "$@";; esac' if 0; use 5.006; use strict; use warnings FATAL => 'all'; BEGIN { my $pkgdatadir = $ENV{'autom4te_perllibdir'} || '@pkgdatadir@'; unshift @INC, $pkgdatadir; # Override SHELL. On DJGPP SHELL may not be set to a shell # that can handle redirection and quote arguments correctly, # e.g.: COMMAND.COM. For DJGPP always use the shell that configure # has detected. $ENV{'SHELL'} = '@SHELL@' if ($^O eq 'dos'); } use File::Basename; use File::Find; use Autom4te::ChannelDefs; use Autom4te::Configure_ac; use Autom4te::FileUtils; use Autom4te::General; use Autom4te::XFile; my (@cfiles, @makefiles, @shfiles, @subdirs, %printed); # The kind of the words we are looking for. my @kinds = qw (function header identifier program makevar librarie); # For each kind, the default macro. my %generic_macro = ( 'function' => 'AC_CHECK_FUNCS', 'header' => 'AC_CHECK_HEADERS', 'identifier' => 'AC_CHECK_TYPES', 'program' => 'AC_CHECK_PROGS', 'library' => 'AC_CHECK_LIB' ); my %kind_comment = ( 'function' => 'Checks for library functions.', 'header' => 'Checks for header files.', 'identifier' => 'Checks for typedefs, structures, and compiler characteristics.', 'program' => 'Checks for programs.', ); # $USED{KIND}{ITEM} is the list of locations where the ITEM (of KIND) was # used in the user package. # For instance $USED{function}{alloca} is the list of 'file:line' where # 'alloca (...)' appears. my %used = (); # $MACRO{KIND}{ITEM} is the list of macros to use to test ITEM. # Initialized from lib/autoscan/*. E.g., $MACRO{function}{alloca} contains # the singleton AC_FUNC_ALLOCA. Some require several checks. my %macro = (); # $NEEDED_MACROS{MACRO} is an array of locations requiring MACRO. # E.g., $NEEDED_MACROS{AC_FUNC_ALLOC} the list of 'file:line' containing # 'alloca (...)'. my %needed_macros = ( 'AC_PREREQ' => [$me], ); my $log; # Autoconf and lib files. my $autom4te = $ENV{'AUTOM4TE'} || '@bindir@/@autom4te-name@'; my $autoconf = "$autom4te --language=autoconf"; my @prepend_include; my @include = ('@pkgdatadir@'); # $help # ----- $help = "Usage: $0 [OPTION]... [SRCDIR] Examine source files in the directory tree rooted at SRCDIR, or the current directory if none is given. Search the source files for common portability problems, check for incompleteness of 'configure.ac', and create a file 'configure.scan' which is a preliminary 'configure.ac' for that package. -h, --help print this help, then exit -V, --version print version number, then exit -v, --verbose verbosely report processing -d, --debug don't remove temporary files Library directories: -B, --prepend-include=DIR prepend directory DIR to search path -I, --include=DIR append directory DIR to search path Report bugs to . GNU Autoconf home page: . General help using GNU software: . "; # $version # -------- $version = "autoscan (@PACKAGE_NAME@) @VERSION@ Copyright (C) @RELEASE_YEAR@ Free Software Foundation, Inc. License GPLv3+/Autoconf: GNU GPL version 3 or later , This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Written by David J. MacKenzie and Akim Demaille. "; ## ------------------------ ## ## Command line interface. ## ## ------------------------ ## # parse_args () # ------------- # Process any command line arguments. sub parse_args () { getopt ('I|include=s' => \@include, 'B|prepend-include=s' => \@prepend_include); die "$me: too many arguments Try '$me --help' for more information.\n" if @ARGV > 1; my $srcdir = $ARGV[0] || "."; verb "srcdir = $srcdir"; chdir $srcdir || error "cannot cd to $srcdir: $!"; } # init_tables () # -------------- # Put values in the tables of what to do with each token. sub init_tables () { # The data file format supports only one line of macros per function. # If more than that is required for a common portability problem, # a new Autoconf macro should probably be written for that case, # instead of duplicating the code in lots of configure.ac files. my $file = find_file ("autoscan/autoscan.list", reverse (@prepend_include), @include); my $table = new Autom4te::XFile ($file, "<"); my $tables_are_consistent = 1; while ($_ = $table->getline) { # Ignore blank lines and comments. next if /^\s*$/ || /^\s*\#/; # ': ' or... # ': warn: '. if (/^(\S+):\s+(\S+)\s+(\S.*)$/) { my ($kind, $word, $macro) = ($1, $2, $3); error "$file:$.: invalid kind: $_" unless grep { $_ eq $kind } @kinds; push @{$macro{$kind}{$word}}, $macro; } else { error "$file:$.: invalid definition: $_"; } } if ($debug) { foreach my $kind (@kinds) { foreach my $word (sort keys %{$macro{$kind}}) { print "$kind: $word: @{$macro{$kind}{$word}}\n"; } } } } # used ($KIND, $WORD, [$WHERE]) # ----------------------------- # $WORD is used as a $KIND. sub used ($$;$) { my ($kind, $word, $where) = @_; $where ||= "$File::Find::name:$."; if ( # Check for all the libraries. But '-links' is certainly a # 'find' argument, and '-le', a 'test' argument. ($kind eq 'library' && $word !~ /^(e|inks)$/) # Other than libraries are to be checked only if listed in # the Autoscan library files. || defined $macro{$kind}{$word} ) { push (@{$used{$kind}{$word}}, $where); } } ## ----------------------- ## ## Scanning source files. ## ## ----------------------- ## # scan_c_file ($FILE-NAME) # ------------------------ sub scan_c_file ($) { my ($file_name) = @_; push @cfiles, $File::Find::name; # Nonzero if in a multiline comment. my $in_comment = 0; my $file = new Autom4te::XFile ($file_name, "<"); while ($_ = $file->getline) { # Strip out comments. if ($in_comment && s,^.*?\*/,,) { $in_comment = 0; } # The whole line is inside a comment. next if $in_comment; # All on one line. s,/\*.*?\*/,,g; # Starting on this line. if (s,/\*.*$,,) { $in_comment = 1; } # Preprocessor directives. if (s/^\s*\#\s*//) { if (/^include\s*<([^>]*)>/) { used ('header', $1); } if (s/^(if|ifdef|ifndef|elif)\s+//) { foreach my $word (split (/\W+/)) { used ('identifier', $word) unless $word eq 'defined' || $word !~ /^[a-zA-Z_]/; } } # Ignore other preprocessor directives. next; } # Remove string and character constants. s,\"[^\"]*\",,g; s,\'[^\']*\',,g; # Tokens in the code. # Maybe we should ignore function definitions (in column 0)? while (s/\b([a-zA-Z_]\w*)\s*\(/ /) { used ('function', $1); } while (s/\b([a-zA-Z_]\w*)\b/ /) { used ('identifier', $1); } } $file->close; } # scan_makefile($MAKEFILE-NAME) # ----------------------------- sub scan_makefile ($) { my ($file_name) = @_; push @makefiles, $File::Find::name; my $file = new Autom4te::XFile ($file_name, "<"); while ($_ = $file->getline) { # Strip out comments. s/#.*//; # Variable assignments. while (s/\b([a-zA-Z_]\w*)\s*=/ /) { used ('makevar', $1); } # Be sure to catch a whole word. For instance 'lex$U.$(OBJEXT)' # is a single token. Otherwise we might believe 'lex' is needed. foreach my $word (split (/\s+/)) { # Libraries. if ($word =~ /^-l([a-zA-Z_]\w*)$/) { used ('library', $1); } # Tokens in the code. # We allow some additional characters, e.g., '+', since # autoscan/programs includes 'c++'. if ($word =~ /^[a-zA-Z_][\w+]*$/) { used ('program', $word); } } } $file->close; } # scan_sh_file($SHELL-SCRIPT-NAME) # -------------------------------- sub scan_sh_file ($) { my ($file_name) = @_; push @shfiles, $File::Find::name; my $file = new Autom4te::XFile ($file_name, "<"); while ($_ = $file->getline) { # Strip out comments and variable references. s/#.*//; s/\$\{[^\}]*}//g; s/@[^@]*@//g; # Tokens in the code. while (s/\b([a-zA-Z_]\w*)\b/ /) { used ('program', $1); } } $file->close; } # scan_file () # ------------ # Called by &find on each file. $_ contains the current file name with # the current directory of the walk through. sub scan_file () { # Wanted only if there is no corresponding FILE.in. return if -f "$_.in"; # Save $_ as Find::File requires it to be preserved. local $_ = $_; # Strip a useless leading './'. $File::Find::name =~ s,^\./,,; if ($_ ne '.' and -d $_ and -f "$_/configure.in" || -f "$_/configure.ac" || -f "$_/configure.gnu" || -f "$_/configure") { $File::Find::prune = 1; push @subdirs, $File::Find::name; } if (/\.[chlym](\.in)?$/) { used 'program', 'cc', $File::Find::name; scan_c_file ($_); } elsif (/\.(cc|cpp|cxx|CC|C|hh|hpp|hxx|HH|H|yy|ypp|ll|lpp)(\.in)?$/) { used 'program', 'c++', $File::Find::name; scan_c_file ($_); } elsif ((/^((?:GNUm|M|m)akefile)(\.in)?$/ && ! -f "$1.am") || /^(?:GNUm|M|m)akefile(\.am)?$/) { scan_makefile ($_); } elsif (/\.sh(\.in)?$/) { scan_sh_file ($_); } } # scan_files () # ------------- # Read through the files and collect lists of tokens in them # that might create nonportabilities. sub scan_files () { find (\&scan_file, '.'); if ($verbose) { print "cfiles: @cfiles\n"; print "makefiles: @makefiles\n"; print "shfiles: @shfiles\n"; foreach my $kind (@kinds) { print "\n$kind:\n"; foreach my $word (sort keys %{$used{$kind}}) { print "$word: @{$used{$kind}{$word}}\n"; } } } } ## ----------------------- ## ## Output configure.scan. ## ## ----------------------- ## # output_kind ($FILE, $KIND) # -------------------------- sub output_kind ($$) { my ($file, $kind) = @_; # Lists of words to be checked with the generic macro. my @have; print $file "\n# $kind_comment{$kind}\n" if exists $kind_comment{$kind}; foreach my $word (sort keys %{$used{$kind}}) { # Output the needed macro invocations in configure.scan if not # already printed, and remember these macros are needed. foreach my $macro (@{$macro{$kind}{$word}}) { if ($macro =~ /^warn:\s+(.*)/) { my $message = $1; foreach my $location (@{$used{$kind}{$word}}) { warn "$location: warning: $message\n"; } } elsif (exists $generic_macro{$kind} && $macro eq $generic_macro{$kind}) { push (@have, $word); push (@{$needed_macros{"$generic_macro{$kind}([$word])"}}, @{$used{$kind}{$word}}); } else { if (! $printed{$macro}) { print $file "$macro\n"; $printed{$macro} = 1; } push (@{$needed_macros{$macro}}, @{$used{$kind}{$word}}); } } } print $file "$generic_macro{$kind}([" . join(' ', sort(@have)) . "])\n" if @have; } # output_libraries ($FILE) # ------------------------ sub output_libraries ($) { my ($file) = @_; print $file "\n# Checks for libraries.\n"; foreach my $word (sort keys %{$used{'library'}}) { print $file "# FIXME: Replace 'main' with a function in '-l$word':\n"; print $file "AC_CHECK_LIB([$word], [main])\n"; } } # output ($CONFIGURE_SCAN) # ------------------------ # Print a proto configure.ac. sub output ($) { my $configure_scan = shift; my %unique_makefiles; my $file = new Autom4te::XFile ($configure_scan, ">"); print $file ("# -*- Autoconf -*-\n" . "# Process this file with autoconf to produce a configure script.\n" . "\n" . "AC_PREREQ([@VERSION@])\n" . "AC_INIT([FULL-PACKAGE-NAME], [VERSION], [BUG-REPORT-ADDRESS])\n"); if (defined $cfiles[0]) { print $file "AC_CONFIG_SRCDIR([$cfiles[0]])\n"; print $file "AC_CONFIG_HEADERS([config.h])\n"; } output_kind ($file, 'program'); output_kind ($file, 'makevar'); output_libraries ($file); output_kind ($file, 'header'); output_kind ($file, 'identifier'); output_kind ($file, 'function'); print $file "\n"; if (@makefiles) { # Change DIR/Makefile.in to DIR/Makefile. foreach my $m (@makefiles) { $m =~ s/\.(?:in|am)$//; $unique_makefiles{$m}++; } print $file ("AC_CONFIG_FILES([", join ("\n ", sort keys %unique_makefiles), "])\n"); } if (@subdirs) { print $file ("AC_CONFIG_SUBDIRS([", join ("\n ", sort @subdirs), "])\n"); } print $file "AC_OUTPUT\n"; $file->close; } ## --------------------------------------- ## ## Checking the accuracy of configure.ac. ## ## --------------------------------------- ## # &check_configure_ac ($CONFIGURE_AC) # ----------------------------------- # Use autoconf to check if all the suggested macros are included # in CONFIGURE_AC. sub check_configure_ac ($) { my ($configure_ac) = @_; # Find what needed macros are invoked in CONFIGURE_AC. # I'd be very happy if someone could explain to me why sort (uniq ...) # doesn't work properly: I need 'uniq (sort ...)'. --akim my $trace_option = join (' --trace=', '', uniq (sort (map { s/\(.*//; $_ } keys %needed_macros))); # Suppress all warnings from the subsidiary autoconf invocation. local $ENV{WARNINGS} = 'none'; verb "running: WARNINGS=none $autoconf $trace_option $configure_ac"; my $traces = new Autom4te::XFile "$autoconf $trace_option $configure_ac |"; while ($_ = $traces->getline) { chomp; my ($file, $line, $macro, @args) = split (/:/, $_); if ($macro =~ /^AC_CHECK_(HEADER|FUNC|TYPE|MEMBER)S$/) { # To be rigorous, we should distinguish between space and comma # separated macros. But there is no point. foreach my $word (split (/\s|,/, $args[0])) { # AC_CHECK_MEMBERS wants 'struct' or 'union'. if ($macro eq "AC_CHECK_MEMBERS" && $word =~ /^stat.st_/) { $word = "struct " . $word; } delete $needed_macros{"$macro([$word])"}; } } else { delete $needed_macros{$macro}; } } $traces->close; # Report the missing macros. foreach my $macro (sort keys %needed_macros) { warn ("$configure_ac: warning: missing $macro wanted by: " . (${$needed_macros{$macro}}[0]) . "\n"); print $log "$me: warning: missing $macro wanted by: \n"; foreach my $need (@{$needed_macros{$macro}}) { print $log "\t$need\n"; } } } ## -------------- ## ## Main program. ## ## -------------- ## parse_args; $log = new Autom4te::XFile ("$me.log", ">"); $autoconf .= " --debug" if $debug; $autoconf .= " --verbose" if $verbose; $autoconf .= join (' --include=', '', map { shell_quote ($_) } @include); $autoconf .= join (' --prepend-include=', '', map { shell_quote ($_) } @prepend_include); my $configure_ac = find_configure_ac; init_tables; scan_files; output ('configure.scan'); if (-f $configure_ac) { check_configure_ac ($configure_ac); } # This close is really needed. For some reason, probably best named # a bug, it seems that the dtor of $LOG is not called automatically # at END. It results in a truncated file. $log->close; exit 0; ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: autoconf-2.71/bin/autoupdate.in0000644000000000000000000010225214004621270013421 00000000000000#! @PERL@ # -*- perl -*- # @configure_input@ # autoupdate - modernize an Autoconf file. # Copyright (C) 1994, 1999-2017, 2020-2021 Free Software Foundation, # Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Originally written by David MacKenzie . # Rewritten by Akim Demaille . eval 'case $# in 0) exec @PERL@ -S "$0";; *) exec @PERL@ -S "$0" "$@";; esac' if 0; use 5.006; use strict; use warnings FATAL => 'all'; BEGIN { my $pkgdatadir = $ENV{'autom4te_perllibdir'} || '@pkgdatadir@'; unshift @INC, $pkgdatadir; # Override SHELL. On DJGPP SHELL may not be set to a shell # that can handle redirection and quote arguments correctly, # e.g.: COMMAND.COM. For DJGPP always use the shell that configure # has detected. $ENV{'SHELL'} = '@SHELL@' if ($^O eq 'dos'); } use File::Basename; use Autom4te::ChannelDefs; use Autom4te::Channels; use Autom4te::Configure_ac; use Autom4te::FileUtils; use Autom4te::General; use Autom4te::XFile; # Lib files. my $autom4te = $ENV{'AUTOM4TE'} || '@bindir@/@autom4te-name@'; my $autoconf = "$autom4te --language=autoconf"; # We need to find m4sugar. my @prepend_include; my @include = ('@pkgdatadir@'); my $force = 0; # m4. my $m4 = $ENV{"M4"} || '@M4@'; # $HELP # ----- $help = "Usage: $0 [OPTION]... [TEMPLATE-FILE]... Update each TEMPLATE-FILE if given, or 'configure.ac' if present, or else 'configure.in', to the syntax of the current version of Autoconf. The original files are backed up. Operation modes: -h, --help print this help, then exit -V, --version print version number, then exit -v, --verbose verbosely report processing -d, --debug don't remove temporary files -f, --force consider all files obsolete Library directories: -B, --prepend-include=DIR prepend directory DIR to search path -I, --include=DIR append directory DIR to search path Report bugs to . GNU Autoconf home page: . General help using GNU software: . "; # $VERSION # -------- $version = "autoupdate (@PACKAGE_NAME@) @VERSION@ Copyright (C) @RELEASE_YEAR@ Free Software Foundation, Inc. License GPLv3+/Autoconf: GNU GPL version 3 or later , This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Written by David J. MacKenzie and Akim Demaille. "; ## ---------- ## ## Routines. ## ## ---------- ## # parse_args () # ------------- # Process any command line arguments. sub parse_args () { my $srcdir; getopt ('I|include=s' => \@include, 'B|prepend-include=s' => \@prepend_include, 'f|force' => \$force); if (! @ARGV) { my $configure_ac = require_configure_ac; push @ARGV, $configure_ac; } } # ----------------- # # Autoconf macros. # # ----------------- # my (%ac_macros, %au_macros, %m4_builtins); # HANDLE_AUTOCONF_MACROS () # ------------------------- # @M4_BUILTINS -- M4 builtins and a useful comment. sub handle_autoconf_macros () { # Get the builtins. xsystem ("echo dumpdef | $m4 2>" . shell_quote ("$tmp/m4.defs") . " >/dev/null"); my $m4_defs = new Autom4te::XFile ("$tmp/m4.defs", "<"); while ($_ = $m4_defs->getline) { $m4_builtins{$1} = 1 if /^(\w+):/; } $m4_defs->close; my $macros = new Autom4te::XFile ("$autoconf" . " --trace AU_DEFINE:'AU:\$f:\$1'" . " --trace define:'AC:\$f:\$1'" . " --melt /dev/null |"); while ($_ = $macros->getline) { chomp; my ($domain, $file, $macro) = /^(AC|AU):(.*):([^:]*)$/ or next; if ($domain eq "AU") { $au_macros{$macro} = 1; } elsif ($file =~ /(^|\/)m4sugar\/(m4sugar|version)\.m4$/) { # Add the m4sugar macros to m4_builtins. $m4_builtins{$macro} = 1; } else { # Autoconf, aclocal, and m4sh macros. $ac_macros{$macro} = 1; } } $macros->close; # Don't keep AU macros in @AC_MACROS. delete $ac_macros{$_} foreach (keys %au_macros); # Don't keep M4sugar macros which are redefined by Autoconf, # such as 'builtin', 'changequote' etc. See autoconf/autoconf.m4. delete $ac_macros{$_} foreach (keys %m4_builtins); error "no current Autoconf macros found" unless keys %ac_macros; error "no obsolete Autoconf macros found" unless keys %au_macros; if ($debug) { print STDERR "Current Autoconf macros:\n"; print STDERR join (' ', sort keys %ac_macros) . "\n\n"; print STDERR "Obsolete Autoconf macros:\n"; print STDERR join (' ', sort keys %au_macros) . "\n\n"; } # ac.m4 -- autoquoting definitions of the AC macros (M4sugar excluded). # unac.m4 -- undefine the AC macros. my $ac_m4 = new Autom4te::XFile ("$tmp/ac.m4", ">"); print $ac_m4 "# ac.m4 -- autoquoting definitions of the AC macros.\n"; my $unac_m4 = new Autom4te::XFile ("$tmp/unac.m4", ">"); print $unac_m4 "# unac.m4 -- undefine the AC macros.\n"; foreach (sort keys %ac_macros) { print $ac_m4 "_au_m4_define([$_], [m4_if(\$#, 0, [[\$0]], [[\$0(\$\@)]])])\n"; print $unac_m4 "_au_m4_undefine([$_])\n"; } # m4save.m4 -- save the m4 builtins. # unm4.m4 -- disable the m4 builtins. # m4.m4 -- enable the m4 builtins. my $m4save_m4 = new Autom4te::XFile ("$tmp/m4save.m4", ">"); print $m4save_m4 "# m4save.m4 -- save the m4 builtins.\n"; my $unm4_m4 = new Autom4te::XFile ("$tmp/unm4.m4", ">"); print $unm4_m4 "# unm4.m4 -- disable the m4 builtins.\n"; my $m4_m4 = new Autom4te::XFile ("$tmp/m4.m4", ">"); print $m4_m4 "# m4.m4 -- enable the m4 builtins.\n"; foreach (sort keys %m4_builtins) { print $m4save_m4 "_au__save([$_])\n"; print $unm4_m4 "_au__undefine([$_])\n"; print $m4_m4 "_au__restore([$_])\n"; } } ## -------------- ## ## Main program. ## ## -------------- ## parse_args; $autoconf .= " --debug" if $debug; $autoconf .= " --force" if $force; $autoconf .= " --verbose" if $verbose; $autoconf .= join (' --include=', '', map { shell_quote ($_) } @include); $autoconf .= join (' --prepend-include=', '', map { shell_quote ($_) } @prepend_include); # Disable all warnings from autoconf invocations. # In particular we do not want warnings about obsolete constructs, # which are on by default as of autoconf 2.70. $ENV{'WARNINGS'} = 'none'; mktmpdir ('au'); handle_autoconf_macros; # $au_changequote -- enable the quote '[', ']' right before any AU macro. my $au_changequote = 's/\b(' . join ('|', keys %au_macros) . ')\b/_au_m4_changequote([,])$1/g'; # au.m4 -- definitions the AU macros. xsystem ("$autoconf --trace AU_DEFINE:'_au_defun(\@<:\@\$1\@:>\@, \@<:\@\$2\@:>\@)' --melt /dev/null " . ">" . shell_quote ("$tmp/au.m4")); ## ------------------- ## ## Process the files. ## ## ------------------- ## foreach my $file (@ARGV) { # We need an actual file. if ($file eq '-') { $file = "$tmp/stdin"; system "cat >" . shell_quote ($file); } elsif (! -r "$file") { die "$me: $file: No such file or directory"; } # input.m4 -- m4 program to produce the updated file. # Load the values, the dispatcher, neutralize m4, and the prepared # input file. my $input_m4 = <<\EOF; divert(-1) -*- Autoconf -*- changequote([,]) # Define our special macros: define([_au__defn], defn([defn])) define([_au__divert], defn([divert])) define([_au__ifdef], defn([ifdef])) define([_au__include], defn([include])) define([_au___undefine], defn([undefine])) define([_au__undefine], [_au__ifdef([$1], [_au___undefine([$1])])]) define([_au__save], [m4_ifdef([$1], [m4_define([_au_$1], _m4_defn([$1]))])]) define([_au__restore], [_au_m4_ifdef([_au_$1], [_au_m4_define([$1], _au__defn([_au_$1]))])]) # Set up m4sugar. include(m4sugar/m4sugar.m4) # Redefine __file__ to make warnings nicer; $file is replaced below. m4_define([__file__], [$file]) # Redefine m4_location to fix the line number. m4_define([m4_location], [__file__:m4_eval(__line__ - _au__first_line)]) # Move all the builtins into the '_au_' pseudo namespace m4_include([m4save.m4]) # _au_defun(NAME, BODY) # --------------------- # Define NAME to BODY, plus AU activation/deactivation. _au_m4_define([_au_defun], [_au_m4_define([$1], [_au_enable()dnl $2[]dnl _au_disable()])]) # Import the definition of the obsolete macros. _au__include([au.m4]) ## ------------------------ ## ## _au_enable/_au_disable. ## ## ------------------------ ## # They work by pair: each time an AU macro is activated, it runs # _au_enable, and at its end its runs _au_disable (see _au_defun # above). AU macros might use AU macros, which should # enable/disable only for the outer AU macros. # # '_au_enabled' is used to this end, determining whether we really # enable/disable. # __au_enable # ----------- # Reenable the builtins, m4sugar, and the autoquoting AC macros. _au_m4_define([__au_enable], [_au__divert(-1) # Enable special characters. _au_m4_changecom([#]) _au__include([m4.m4]) _au__include([ac.m4]) _au__divert(0)]) # _au_enable # ---------- # Called at the beginning of all the obsolete macros. If this is the # outermost level, call __au_enable. _au_m4_define([_au_enable], [_au_m4_ifdef([_au_enabled], [], [__au_enable()])_au_dnl _au_m4_pushdef([_au_enabled])]) # __au_disable # ------------ # Disable the AC autoquoting macros, m4sugar, and m4. _au_m4_define([__au_disable], [_au__divert(-1) _au__include([unac.m4]) _au__include([unm4.m4]) # Disable special characters. _au_m4_changequote() _au_m4_changecom() _au__divert(0)]) # _au_disable # ----------- # Called at the end of all the obsolete macros. If we are at the # outermost level, call __au_disable. _au_m4_define([_au_disable], [_au_m4_popdef([_au_enabled])_au_dnl _au_m4_ifdef([_au_enabled], [], [__au_disable()])]) ## ------------------------------- ## ## Disable, and process the file. ## ## ------------------------------- ## # The AC autoquoting macros are not loaded yet, hence invoking # '_au_disable' would be wrong. _au__include([unm4.m4]) # Disable special characters, and set the first line number. _au_m4_changequote() _au_m4_changecom() _au_m4_define(_au__first_line, _au___line__)_au__divert(0)_au_dnl EOF $input_m4 =~ s/^ //mg; $input_m4 =~ s/\$file/$file/g; # prepared input -- input, but reenables the quote before each AU macro. open INPUT_M4, ">", "$tmp/input.m4" or error "cannot open: $!"; open FILE, "<", $file or error "cannot open: $!"; print INPUT_M4 "$input_m4"; while () { eval $au_changequote; print INPUT_M4; } close FILE or error "cannot close $file: $!"; close INPUT_M4 or error "cannot close $tmp/input.m4: $!"; # Now ask m4 to perform the update. xsystem ("$m4 --include=" . shell_quote ($tmp) . join (' --include=', '', map { shell_quote ($_) } reverse (@prepend_include)) . join (' --include=', '', map { shell_quote ($_) } @include) . " " . shell_quote ("$tmp/input.m4") . " > " . shell_quote ("$tmp/updated")); update_file ("$tmp/updated", "$file" eq "$tmp/stdin" ? '-' : "$file"); } exit 0; # ## ---------------------------- ## # ## How 'autoupdate' functions. ## # ## ---------------------------- ## # # The task of 'autoupdate' is not trivial: the biggest difficulty being # that you must limit the changes to the parts that really need to be # updated. Finding a satisfying implementation proved to be quite hard, # as this is the fifth implementation of 'autoupdate'. # # Below, we will use a simple example of an obsolete macro: # # AU_DEFUN([OLD], [NEW([$1, $2], m4_eval([$1 + $2]))]) # AC_DEFUN([NEW], [echo "sum($1) = $2"]) # # the input file contains # # dnl The Unbelievable Truth # OLD(1, 2) # NEW([0, 0], [0]) # # Of course the expected output is # # dnl The Unbelievable Truth # NEW([1, 2], [3]) # NEW([0, 0], [0]) # # # # First implementation: sed # # ========================= # # The first implementation was only able to change the name of obsolete # macros. # # The file 'acoldnames.m4' defined the old names based on the new names. # It was simple then to produce a sed script such as: # # s/OLD/NEW/g # # Updating merely consisted in running this script on the file to # update. # # This scheme suffers from an obvious limitation: that 'autoupdate' was # unable to cope with new macros that just swap some of its arguments # compared to the old macro. Fortunately, that was enough to upgrade # from Autoconf 1 to Autoconf 2. (But I have no idea whether the # changes in Autoconf 2 were precisely limited by this constraint.) # # # # Second implementation: hooks # # ============================ # # The version 2.15 of Autoconf brought a vast number of changes compared # to 2.13, so a solution was needed. One could think of extending the # 'sed' scripts with specialized code for complex macros. However, this # approach is of course full of flaws: # # a. the Autoconf maintainers have to write these snippets, which we # just don't want to, # # b. I really don't think you'll ever manage to handle the quoting of # m4 with a sed script. # # To satisfy a., let's remark that the code which implements the old # features in term of the new feature is exactly the code which should # replace the old code. # # To answer point b, as usual in the history of Autoconf, the answer, at # least on the paper, is simple: m4 is the best tool to parse m4, so # let's use m4. # # Therefore the specification is: # # I want to be able to tell Autoconf, well, m4, that the macro I # am currently defining is an obsolete macro (so that the user is # warned), and its code is the code to use when running autoconf, # but that the very same code has to be used when running # autoupdate. To summarize, the interface I want is # 'AU_DEFUN(OLD-NAME, NEW-CODE)'. # # # Now for the technical details. # # When running autoconf, except for the warning, AU_DEFUN is basically # AC_DEFUN. # # When running autoupdate, we want *only* OLD-NAMEs to be expanded. # This obviously means that acgeneral.m4 and acspecific.m4 must not be # loaded. Nonetheless, because we want to use a rich set of m4 # features, m4sugar.m4 is needed. Please note that the fact that # Autoconf's macros are not loaded is positive on two points: # # - we do get an updated 'configure.ac', not a 'configure'! # # - the old macros are replaced by *calls* to the new-macros, not the # body of the new macros, since their body is not defined!!! # (Whoa, that's really beautiful!). # # Additionally we need to disable the quotes when reading the input for # two reasons: first because otherwise 'm4' will swallow the quotes of # other macros: # # NEW([1, 2], 3) # => NEW(1, 2, 3) # # and second, because we want to update the macro calls which are # quoted, i.e., we want # # FOO([OLD(1, 2)]) # => FOO([NEW([1, 2], [3])]) # # If we don't disable the quotes, only the macros called at the top # level would be updated. # # So, let's disable the quotes. # # Well, not quite: m4sugar.m4 still needs to use quotes for some macros. # Well, in this case, when running in autoupdate code, each macro first # reestablishes the quotes, expands itself, and disables the quotes. # # Thinking a bit more, you realize that in fact, people may use 'define', # 'ifelse' etc. in their files, and you certainly don't want to process # them. Another example is 'dnl': you don't want to remove the # comments. You then realize you don't want exactly to import m4sugar: # you want to specify when it is enabled (macros active), and disabled. # m4sugar provides m4_disable/m4_enable to this end. # # You're getting close to it. Now remains one task: how to handle # twofold definitions? # # Remember that the same AU_DEFUN must be understood in two different # ways, the AC way, and the AU way. # # One first solution is to check whether acgeneral.m4 was loaded. But # that's definitely not cute. Another is simply to install 'hooks', # that is to say, to keep in some place m4 knows, late 'define' to be # triggered *only* in AU mode. # # You first think of designing AU_DEFUN like this: # # 1. AC_DEFUN(OLD-NAME, # [Warn the user OLD-NAME is obsolete. # NEW-CODE]) # # 2. Store for late AU binding([define(OLD_NAME, # [Reestablish the quotes. # NEW-CODE # Disable the quotes.])]) # # but this will not work: NEW-CODE probably uses $1, $2 etc. and these # guys will be replaced with the argument of 'Store for late AU binding' # when you call it. # # I don't think there is a means to avoid this using this technology # (remember that $1 etc. are *always* expanded in m4). You may also try # to replace them with $[1] to preserve them for a later evaluation, but # if 'Store for late AU binding' is properly written, it will remain # quoted till the end... # # You have to change technology. Since the problem is that '$1' # etc. should be 'consumed' right away, one solution is to define now a # second macro, 'AU_OLD-NAME', and to install a hook than binds OLD-NAME # to AU_OLD-NAME. Then, autoupdate.m4 just need to run the hooks. By # the way, the same method was used in autoheader. # # # # Third implementation: m4 namespaces by m4sugar # # ============================================== # # Actually, this implementation was just a clean up of the previous # implementation: instead of defining hooks by hand, m4sugar was equipped # with 'namespaces'. What are they? # # Sometimes we want to disable some *set* of macros, and restore them # later. We provide support for this via namespaces. # # There are basically three characters playing this scene: defining a # macro in a namespace, disabling a namespace, and restoring a namespace # (i.e., all the definitions it holds). # # Technically, to define a MACRO in NAMESPACE means to define the macro # named 'NAMESPACE::MACRO' to the VALUE. At the same time, we append # 'undefine(NAME)' in the macro named 'm4_disable(NAMESPACE)', and # similarly a binding of NAME to the value of 'NAMESPACE::MACRO' in # 'm4_enable(NAMESPACE)'. These mechanisms allow to bind the macro of # NAMESPACE and to unbind them at will. # # Of course this implementation is really inefficient: m4 has to grow # strings which can become quickly huge, which slows it significantly. # # In particular one should avoid as much as possible to use 'define' for # temporaries. Now that 'define' has quite a complex meaning, it is an # expensive operations that should be limited to macros. Use # 'm4_define' for temporaries. # # Private copies of the macros we used in entering / exiting the m4sugar # namespace. It is much more convenient than fighting with the renamed # version of define etc. # # # # Those two implementations suffered from serious problems: # # - namespaces were really expensive, and incurred a major performance # loss on 'autoconf' itself, not only 'autoupdate'. One solution # would have been the limit the use of namespaces to 'autoupdate', but # that's again some complications on m4sugar, which really doesn't need # this. So we wanted to get rid of the namespaces. # # - since the quotes were disabled, autoupdate was sometimes making # wrong guesses, for instance on: # # foo([1, 2]) # # m4 saw 2 arguments: '[1'and '2]'. A simple solution, somewhat # fragile, is to reestablish the quotes right before all the obsolete # macros, i.e., to use sed so that the previous text becomes # # changequote([, ])foo([1, 2]) # # To this end, one wants to trace the definition of obsolete macros. # # It was there that the limitations of the namespace approach became # painful: because it was a complex machinery playing a lot with the # builtins of m4 (hence, quite fragile), tracing was almost impossible. # # # So this approach was dropped. # # # # The fourth implementation: two steps # # ==================================== # # If you drop the uses of namespaces, you no longer can compute the # updated value, and replace the old call with it simultaneously. # # Obviously you will use m4 to compute the updated values, but you may # use some other tool to achieve the replacement. Personally, I trust # nobody but m4 to parse m4, so below, m4 will perform the two tasks. # # How can m4 be used to replace *some* macros calls with newer values. # Well, that's dead simple: m4 should learn the definitions of obsolete # macros, forget its builtins, disable the quotes, and then run on the # input file, which amounts to doing this: # # divert(-1)dnl # changequote([, ]) # define([OLD], [NEW([$1, $2], m4_eval([$1 + $2]))changequote()]) # undefine([dnl]) # undefine([m4_eval]) # # Some more undefines... # changequote() # divert(0)dnl # dnl The Unbelievable Truth # changequote([, ])OLD(1, 2) # NEW([0, 0], # 0) # # which will result in # # dnl The Unbelievable Truth # NEW(1, 2, m4_eval(1 + 2)) # NEW([0, 0], # 0) # # Grpmh. Two problems. A minor problem: it would have been much better # to have the 'm4_eval' computed, and a major problem: you lost the # quotation in the result. # # Let's address the big problem first. One solution is to define any # modern macro to rewrite its calls with the proper quotation, thanks to # '$@'. Again, tracing the 'define's makes it possible to know which # are these macros, so you input is: # # divert(-1)dnl # changequote([, ]) # define([OLD], [NEW([$1, $2], m4_eval([$1 + $2]))changequote()]) # define([NEW], [[NEW($@)]changequote()]) # undefine([dnl]) # undefine([m4_eval]) # # Some more undefines... # changequote() # divert(0)dnl # dnl The Unbelievable Truth # changequote([, ])OLD(1, 2) # changequote([, ])NEW([0, 0], # 0) # # which results in # # dnl The Unbelievable Truth # NEW([1, 2],[m4_eval(1 + 2)]) # NEW([0, 0],[0]) # # Our problem is solved, i.e., the first call to 'NEW' is properly # quoted, but introduced another problem: we changed the layout of the # second calls, which can be a drama in the case of huge macro calls # (think of 'AC_TRY_RUN' for instance). This example didn't show it, # but we also introduced parens to macros which did not have some: # # AC_INIT # => AC_INIT() # # No big deal for the semantics (unless the macro depends upon $#, which # is bad), but the users would not be happy. # # Additionally, we introduced quotes that were not there before, which is # OK in most cases, but could change the semantics of the file. # # Cruel dilemma: we do want the auto-quoting definition of 'NEW' when # evaluating 'OLD', but we don't when we evaluate the second 'NEW'. # Back to namespaces? # # No. # # # # Second step: replacement # # ------------------------ # # No, as announced above, we will work in two steps: in a first step we # compute the updated values, and in a second step we replace them. Our # goal is something like this: # # divert(-1)dnl # changequote([, ]) # define([OLD], [NEW([1, 2], [3])changequote()]) # undefine([dnl]) # undefine([m4_eval]) # # Some more undefines... # changequote() # divert(0)dnl # dnl The Unbelievable Truth # changequote([, ])OLD(1, 2) # NEW([0, 0], # 0) # # i.e., the new value of 'OLD' is precomputed using the auto-quoting # definition of 'NEW' and the m4 builtins. We'll see how afterwards, # let's finish with the replacement. # # Of course the solution above is wrong: if there were other calls to # 'OLD' with different values, we would smash them to the same value. # But it is quite easy to generalize the scheme above: # # divert(-1)dnl # changequote([, ]) # define([OLD([1],[2])], [NEW([1, 2], [3])]) # define([OLD], [defn([OLD($@)])changequote()]) # undefine([dnl]) # undefine([m4_eval]) # # Some more undefines... # changequote() # divert(0)dnl # dnl The Unbelievable Truth # changequote([, ])OLD(1, 2) # NEW([0, 0], # 0) # # i.e., for each call to obsolete macros, we build an array 'call => # value', and use a macro to dispatch these values. This results in: # # dnl The Unbelievable Truth # NEW([1, 2], [3]) # NEW([0, 0], # 0) # # In French, we say 'Youpi !', which you might roughly translate as # 'Yippee!'. # # # # First step: computation # # ----------------------- # # Let's study the anatomy of the file, and name its sections: # # prologue # divert(-1)dnl # changequote([, ]) # values # define([OLD([1],[2])], [NEW([1, 2], [3])]) # dispatcher # define([OLD], [defn([OLD($@)])changequote()]) # disabler # undefine([dnl]) # undefine([m4_eval]) # # Some more undefines... # changequote() # divert(0)dnl # input # dnl The Unbelievable Truth # changequote([, ])OLD(1, 2) # NEW([0, 0], # 0) # # # # Computing the 'values' section # # .............................. # # First we need to get the list of all the AU macro uses. To this end, # first get the list of all the AU macros names by tracing 'AU_DEFUN' in # the initialization of autoconf. This list is computed in the file # 'au.txt' below. # # Then use this list to trace all the AU macro uses in the input. The # goal is obtain in the case of our example: # # [define([OLD([1],[2])],]@<<@OLD([1],[2])@>>@[)] # # This is the file 'values.in' below. # # We want to evaluate this with only the builtins (in fact m4sugar), the # auto-quoting definitions of the new macros ('new.m4'), and the # definition of the old macros ('old.m4'). Computing these last two # files is easy: it's just a matter of using the right '--trace' option. # # So the content of 'values.in' is: # # include($autoconf_dir/m4sugar.m4) # m4_include(new.m4) # m4_include(old.m4) # divert(0)dnl # [define([OLD([1],[2])],]@<<@OLD([1],[2])@>>@[)] # # We run m4 on it, which yields: # # define([OLD([1],[2])],@<<@NEW([1, 2], [3])@>>@) # # Transform '@<<@' and '@>>@' into quotes and we get # # define([OLD([1],[2])],[NEW([1, 2], [3])]) # # This is 'values.m4'. # # # # Computing the 'dispatcher' section # # .................................. # # The 'prologue', and the 'disabler' are simple and need no commenting. # # To compute the 'dispatcher' ('dispatch.m4'), again, it is a simple # matter of using the right '--trace'. # # Finally, the input is not exactly the input file, rather it is the # input file with the added 'changequote'. To this end, we build # 'quote.sed'. # # # # Putting it all together # # ....................... # # We build the file 'input.m4' which contains: # # divert(-1)dnl # changequote([, ]) # include(values.m4) # include(dispatch.m4) # undefine([dnl]) # undefine([eval]) # # Some more undefines... # changequote() # divert(0)dnl # dnl The Unbelievable Truth # changequote([, ])OLD(1, 2) # NEW([0, 0], # 0) # # And we just run m4 on it. Et voila`, Monsieur ! Mais oui, mais oui. # # Well, there are a few additional technicalities. For instance, we # rely on 'changequote', 'ifelse' and 'defn', but we don't want to # interpret the changequotes of the user, so we simply use another name: # '_au_changequote' etc. # # # # Failure of the fourth approach # # ------------------------------ # # This approach is heavily based on traces, but then there is an obvious # problem: non expanded code will never be seen. In particular, the body # of a 'define' definition is not seen, so on the input # # define([idem], [OLD(0, [$1])]) # # autoupdate would never see the 'OLD', and wouldn't have updated it. # Worse yet, if 'idem(0)' was used later, then autoupdate sees that # 'OLD' is used, computes the result for 'OLD(0, 0)' and sets up a # dispatcher for 'OLD'. Since there was no computed value for 'OLD(0, # [$1])', the dispatcher would have replaced with... nothing, leading # to # # define([idem], []) # # With some more thinking, you see that the two step approach is wrong, # the namespace approach was much saner. # # But you learned a lot, in particular you realized that using traces # can make it possible to simulate namespaces! # # # # # The fifth implementation: m4 namespaces by files # # ================================================ # # The fourth implementation demonstrated something unsurprising: you # cannot precompute, i.e., the namespace approach was the right one. # Still, we no longer want them, they're too expensive. Let's have a # look at the way it worked. # # When updating # # dnl The Unbelievable Truth # OLD(1, 2) # NEW([0, 0], [0]) # # you evaluate 'input.m4': # # divert(-1) # changequote([, ]) # define([OLD], # [m4_enable()NEW([$1, $2], m4_eval([$1 + $2]))m4_disable()]) # ... # m4_disable() # dnl The Unbelievable Truth # OLD(1, 2) # NEW([0, 0], [0]) # # where 'm4_disable' undefines the m4 and m4sugar, and disables the quotes # and comments: # # define([m4_disable], # [undefine([__file__]) # ... # changecom(#) # changequote()]) # # 'm4_enable' does the converse: reestablish quotes and comments # --easy--, reestablish m4sugar --easy: just load 'm4sugar.m4' again-- and # reenable the builtins. This later task requires that you first save # the builtins. And BTW, the definition above of 'm4_disable' cannot # work: you undefined 'changequote' before using it! So you need to use # your privates copies of the builtins. Let's introduce three files for # this: # # 'm4save.m4' # moves the m4 builtins into the '_au_' pseudo namespace, # 'unm4.m4' # undefines the builtins, # 'm4.m4' # restores them. # # So 'input.m4' is: # # divert(-1) # changequote([, ]) # # include([m4save.m4]) # # # Import AU. # define([OLD], # [m4_enable()NEW([$1, $2], m4_eval([$1 + $2]))m4_disable()]) # # define([_au_enable], # [_au_changecom([#]) # _au_include([m4.m4]) # _au_include(m4sugar.m4)]) # # define([_au_disable], # [# Disable m4sugar. # # Disable the m4 builtins. # _au_include([unm4.m4]) # # 1. Disable special characters. # _au_changequote() # _au_changecom()]) # # m4_disable() # dnl The Unbelievable Truth # OLD(1, 2) # NEW([0, 0], [0]) # # Based on what we learned in the fourth implementation we know that we # have to enable the quotes *before* any AU macro, and we know we need # to build autoquoting versions of the AC macros. But the autoquoting # AC definitions must be disabled in the rest of the file, and enabled # inside AU macros. # # Using 'autoconf --trace' it is easy to build the files # # 'ac.m4' # define the autoquoting AC fake macros # 'disable.m4' # undefine the m4sugar and AC autoquoting macros. # 'au.m4' # definitions of the AU macros (such as 'OLD' above). # # Now, 'input.m4' is: # # divert(-1) # changequote([, ]) # # include([m4save.m4]) # # Import AU. # include([au.m4]) # # define([_au_enable], # [_au_changecom([#]) # _au_include([m4.m4]) # _au_include(m4sugar.m4) # _au_include(ac.m4)]) # # define([_au_disable], # [_au_include([disable.m4]) # _au_include([unm4.m4]) # # 1. Disable special characters. # _au_changequote() # _au_changecom()]) # # m4_disable() # dnl The Unbelievable Truth # _au_changequote([, ])OLD(1, 2) # NEW([0, 0], [0]) # # Finally, version V is ready. # # Well... almost. # # There is a slight problem that remains: if an AU macro OUTER includes # an AU macro INNER, then _au_enable will be run when entering OUTER # and when entering INNER (not good, but not too bad yet). But when # getting out of INNER, _au_disable will disable everything while we # were still in OUTER. Badaboom. # # Therefore _au_enable and _au_disable have to be written to work by # pairs: each _au_enable pushdef's _au_enabled, and each _au_disable # popdef's _au_enabled. And of course _au_enable and _au_disable are # effective when _au_enabled is *not* defined. # # Finally, version V' is ready. And there is much rejoicing. (And I # have free time again. I think. Yeah, right.) ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: autoconf-2.71/bin/ifnames.in0000644000000000000000000001005214004621270012664 00000000000000#! @PERL@ # -*- perl -*- # @configure_input@ eval 'case $# in 0) exec @PERL@ -S "$0";; *) exec @PERL@ -S "$0" "$@";; esac' if 0; # ifnames - print the identifiers used in C preprocessor conditionals # Copyright (C) 1994-1995, 1999-2003, 2005-2017, 2020-2021 Free Software # Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Reads from stdin if no files are given. # Writes to stdout. # Written by David MacKenzie # and Paul Eggert . use 5.006; use strict; use warnings FATAL => 'all'; BEGIN { my $pkgdatadir = $ENV{'autom4te_perllibdir'} || '@pkgdatadir@'; unshift @INC, $pkgdatadir; # Override SHELL. On DJGPP SHELL may not be set to a shell # that can handle redirection and quote arguments correctly, # e.g.: COMMAND.COM. For DJGPP always use the shell that configure # has detected. $ENV{'SHELL'} = '@SHELL@' if ($^O eq 'dos'); } use Autom4te::General; use Autom4te::XFile; use Autom4te::FileUtils; # $HELP # ----- $help = "Usage: $0 [OPTION]... [FILE]... Scan all of the C source FILES (or the standard input, if none are given) and write to the standard output a sorted list of all the identifiers that appear in those files in '#if', '#elif', '#ifdef', or '#ifndef' directives. Print each identifier on a line, followed by a space-separated list of the files in which that identifier occurs. -h, --help print this help, then exit -V, --version print version number, then exit Report bugs to . GNU Autoconf home page: . General help using GNU software: . "; # $VERSION # -------- $version = "ifnames (@PACKAGE_NAME@) @VERSION@ Copyright (C) @RELEASE_YEAR@ Free Software Foundation, Inc. License GPLv3+/Autoconf: GNU GPL version 3 or later , This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Written by David J. MacKenzie and Paul Eggert. "; # &parse_args () # -------------- # Process any command line arguments. sub parse_args () { getopt (); } # %OCCURRENCE # ----------- my %occurrence; # &scan_file ($FILE-NAME) # ----------------------- sub scan_file ($) { my ($file_name) = @_; my $file = new Autom4te::XFile ($file_name, "<"); while ($_ = $file->getline) { # Continuation lines. $_ .= $file->getline while (s/\\$//); # Preprocessor directives. if (s/^\s*\#\s*(if|ifdef|ifndef|elif)\s+//) { # Remove comments. Not perfect, but close enough. s(/\*.*?\*/)(); s(/\*.*)(); s(//.*)(); foreach my $word (split (/\W+/)) { next if $word eq 'defined' || $word !~ /^[a-zA-Z_]/; $occurrence{$word}{$file_name} = 1; } } } } ## ------ ## ## Main. ## ## ------ ## parse_args(); foreach (@ARGV) { scan_file ($_); } foreach (sort keys %occurrence) { print "$_ ", join (' ', sort keys %{$occurrence{$_}}), "\n"; } ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: autoconf-2.71/build-aux/0000755000000000000000000000000014004625653012127 500000000000000autoconf-2.71/build-aux/config.guess0000755000000000000000000014044614004621310014362 00000000000000#! /bin/sh # Attempt to guess a canonical system name. # Copyright 1992-2021 Free Software Foundation, Inc. timestamp='2021-01-25' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: # https://git.savannah.gnu.org/cgit/config.git/plain/config.guess # # Please send patches to . me=$(echo "$0" | sed -e 's,.*/,,') usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright 1992-2021 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. tmp= # shellcheck disable=SC2172 trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15 set_cc_for_build() { # prevent multiple calls if $tmp is already set test "$tmp" && return 0 : "${TMPDIR=/tmp}" # shellcheck disable=SC2039 { tmp=$( (umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null) && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } dummy=$tmp/dummy case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in ,,) echo "int x;" > "$dummy.c" for driver in cc gcc c89 c99 ; do if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then CC_FOR_BUILD="$driver" break fi done if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac } # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if test -f /.attbin/uname ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=$( (uname -m) 2>/dev/null) || UNAME_MACHINE=unknown UNAME_RELEASE=$( (uname -r) 2>/dev/null) || UNAME_RELEASE=unknown UNAME_SYSTEM=$( (uname -s) 2>/dev/null) || UNAME_SYSTEM=unknown UNAME_VERSION=$( (uname -v) 2>/dev/null) || UNAME_VERSION=unknown case "$UNAME_SYSTEM" in Linux|GNU|GNU/*) LIBC=unknown set_cc_for_build cat <<-EOF > "$dummy.c" #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc #elif defined(__GLIBC__) LIBC=gnu #else #include /* First heuristic to detect musl libc. */ #ifdef __DEFINED_va_list LIBC=musl #endif #endif EOF eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g')" # Second heuristic to detect musl libc. if [ "$LIBC" = unknown ] && command -v ldd >/dev/null && ldd --version 2>&1 | grep -q ^musl; then LIBC=musl fi # If the system lacks a compiler, then just pick glibc. # We could probably try harder. if [ "$LIBC" = unknown ]; then LIBC=gnu fi ;; esac # Note: order is significant - the case branches are not exclusive. case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \ /sbin/sysctl -n hw.machine_arch 2>/dev/null || \ /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \ echo unknown)) case "$UNAME_MACHINE_ARCH" in aarch64eb) machine=aarch64_be-unknown ;; armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; earmv*) arch=$(echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,') endian=$(echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p') machine="${arch}${endian}"-unknown ;; *) machine="$UNAME_MACHINE_ARCH"-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. case "$UNAME_MACHINE_ARCH" in earm*) os=netbsdelf ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # Determine ABI tags. case "$UNAME_MACHINE_ARCH" in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' abi=$(echo "$UNAME_MACHINE_ARCH" | sed -e "$expr") ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "$UNAME_VERSION" in Debian*) release='-gnu' ;; *) release=$(echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2) ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "$machine-${os}${release}${abi-}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=$(arch | sed 's/Bitrig.//') echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=$(arch | sed 's/OpenBSD.//') echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" exit ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=$(arch | sed 's/^.*BSD\.//') echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" exit ;; *:MidnightBSD:*:*) echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" exit ;; *:ekkoBSD:*:*) echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" exit ;; *:SolidBSD:*:*) echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" exit ;; *:OS108:*:*) echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE" exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:MirBSD:*:*) echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" exit ;; *:Sortix:*:*) echo "$UNAME_MACHINE"-unknown-sortix exit ;; *:Twizzler:*:*) echo "$UNAME_MACHINE"-unknown-twizzler exit ;; *:Redox:*:*) echo "$UNAME_MACHINE"-unknown-redox exit ;; mips:OSF1:*.*) echo mips-dec-osf1 exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $3}') ;; *5.*) UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $4}') ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=$(/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1) case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE=alpha ;; "EV4.5 (21064)") UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") UNAME_MACHINE=alpha ;; "EV5 (21164)") UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo "$UNAME_MACHINE"-dec-osf"$(echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)" # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo "$UNAME_MACHINE"-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo "$UNAME_MACHINE"-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix"$UNAME_RELEASE" exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "$( (/bin/universe) 2>/dev/null)" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case $(/usr/bin/uname -p) in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo "$UNAME_MACHINE"-ibm-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')" exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')" exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux"$UNAME_RELEASE" exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) set_cc_for_build SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if test "$CC_FOR_BUILD" != no_compiler_found; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH=x86_64 fi fi echo "$SUN_ARCH"-pc-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" exit ;; sun4*:SunOS:*:*) case "$(/usr/bin/arch -k)" in Series*|S4*) UNAME_RELEASE=$(uname -v) ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/')" exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos"$UNAME_RELEASE" exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=$( (sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null) test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 case "$(/bin/arch)" in sun3) echo m68k-sun-sunos"$UNAME_RELEASE" ;; sun4) echo sparc-sun-sunos"$UNAME_RELEASE" ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos"$UNAME_RELEASE" exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint"$UNAME_RELEASE" exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint"$UNAME_RELEASE" exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint"$UNAME_RELEASE" exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint"$UNAME_RELEASE" exit ;; m68k:machten:*:*) echo m68k-apple-machten"$UNAME_RELEASE" exit ;; powerpc:machten:*:*) echo powerpc-apple-machten"$UNAME_RELEASE" exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix"$UNAME_RELEASE" exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix"$UNAME_RELEASE" exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix"$UNAME_RELEASE" exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" && dummyarg=$(echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p') && SYSTEM_NAME=$("$dummy" "$dummyarg") && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos"$UNAME_RELEASE" exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=$(/usr/bin/uname -p) if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110 then if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \ test "$TARGET_BINARY_INTERFACE"x = x then echo m88k-dg-dgux"$UNAME_RELEASE" else echo m88k-dg-dguxbcs"$UNAME_RELEASE" fi else echo i586-dg-dgux"$UNAME_RELEASE" fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/g')" exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'$(uname -s)'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if test -x /usr/bin/oslevel ; then IBM_REV=$(/usr/bin/oslevel) else IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=$(/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }') if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if test -x /usr/bin/lslpp ; then IBM_REV=$(/usr/bin/lslpp -Lqc bos.rte.libc | awk -F: '{ print $3 }' | sed s/[0-9]*$/0/) else IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" fi echo "$IBM_ARCH"-ibm-aix"$IBM_REV" exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//') case "$UNAME_MACHINE" in 9000/31?) HP_ARCH=m68000 ;; 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if test -x /usr/bin/getconf; then sc_cpu_version=$(/usr/bin/getconf SC_CPU_VERSION 2>/dev/null) sc_kernel_bits=$(/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null) case "$sc_cpu_version" in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "$sc_kernel_bits" in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi if test "$HP_ARCH" = ""; then set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=$("$dummy") test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if test "$HP_ARCH" = hppa2.0w then set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH=hppa2.0w else HP_ARCH=hppa64 fi fi echo "$HP_ARCH"-hp-hpux"$HPUX_REV" exit ;; ia64:HP-UX:*:*) HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//') echo ia64-hp-hpux"$HPUX_REV" exit ;; 3050*:HI-UX:*:*) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if test -x /usr/sbin/sysversion ; then echo "$UNAME_MACHINE"-unknown-osf1mk else echo "$UNAME_MACHINE"-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=$(uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz) FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///') FUJITSU_REL=$(echo "$UNAME_RELEASE" | sed -e 's/ /_/') echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///') FUJITSU_REL=$(echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/') echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi"$UNAME_RELEASE" exit ;; *:BSD/OS:*:*) echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" exit ;; arm:FreeBSD:*:*) UNAME_PROCESSOR=$(uname -p) set_cc_for_build if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabi else echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabihf fi exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=$(/usr/bin/uname -p) case "$UNAME_PROCESSOR" in amd64) UNAME_PROCESSOR=x86_64 ;; i386) UNAME_PROCESSOR=i586 ;; esac echo "$UNAME_PROCESSOR"-unknown-freebsd"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')" exit ;; i*:CYGWIN*:*) echo "$UNAME_MACHINE"-pc-cygwin exit ;; *:MINGW64*:*) echo "$UNAME_MACHINE"-pc-mingw64 exit ;; *:MINGW*:*) echo "$UNAME_MACHINE"-pc-mingw32 exit ;; *:MSYS*:*) echo "$UNAME_MACHINE"-pc-msys exit ;; i*:PW*:*) echo "$UNAME_MACHINE"-pc-pw32 exit ;; *:Interix*:*) case "$UNAME_MACHINE" in x86) echo i586-pc-interix"$UNAME_RELEASE" exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix"$UNAME_RELEASE" exit ;; IA64) echo ia64-unknown-interix"$UNAME_RELEASE" exit ;; esac ;; i*:UWIN*:*) echo "$UNAME_MACHINE"-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-pc-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')" exit ;; *:GNU:*:*) # the GNU system echo "$(echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,')-unknown-$LIBC$(echo "$UNAME_RELEASE"|sed -e 's,/.*$,,')" exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo "$UNAME_MACHINE-unknown-$(echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]")$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')-$LIBC" exit ;; *:Minix:*:*) echo "$UNAME_MACHINE"-unknown-minix exit ;; aarch64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; alpha:Linux:*:*) case $(sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null) in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arc:Linux:*:* | arceb:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; arm*:Linux:*:*) set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi else echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf fi fi exit ;; avr32*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; cris:Linux:*:*) echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; crisv32:Linux:*:*) echo "$UNAME_MACHINE"-axis-linux-"$LIBC" exit ;; e2k:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; frv:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; hexagon:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:Linux:*:*) echo "$UNAME_MACHINE"-pc-linux-"$LIBC" exit ;; ia64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; k1om:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m32r*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; m68*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; mips:Linux:*:* | mips64:Linux:*:*) set_cc_for_build IS_GLIBC=0 test x"${LIBC}" = xgnu && IS_GLIBC=1 sed 's/^ //' << EOF > "$dummy.c" #undef CPU #undef mips #undef mipsel #undef mips64 #undef mips64el #if ${IS_GLIBC} && defined(_ABI64) LIBCABI=gnuabi64 #else #if ${IS_GLIBC} && defined(_ABIN32) LIBCABI=gnuabin32 #else LIBCABI=${LIBC} #endif #endif #if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 CPU=mipsisa64r6 #else #if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6 CPU=mipsisa32r6 #else #if defined(__mips64) CPU=mips64 #else CPU=mips #endif #endif #endif #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) MIPS_ENDIAN=el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) MIPS_ENDIAN= #else MIPS_ENDIAN= #endif #endif EOF eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI')" test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; } ;; mips64el:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-"$LIBC" exit ;; or32:Linux:*:* | or1k*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; padre:Linux:*:*) echo sparc-unknown-linux-"$LIBC" exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-"$LIBC" exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case $(grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2) in PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; *) echo hppa-unknown-linux-"$LIBC" ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-"$LIBC" exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-"$LIBC" exit ;; ppc64le:Linux:*:*) echo powerpc64le-unknown-linux-"$LIBC" exit ;; ppcle:Linux:*:*) echo powerpcle-unknown-linux-"$LIBC" exit ;; riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" exit ;; sh64*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sh*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; tile*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; vax:Linux:*:*) echo "$UNAME_MACHINE"-dec-linux-"$LIBC" exit ;; x86_64:Linux:*:*) set_cc_for_build LIBCABI=$LIBC if test "$CC_FOR_BUILD" != no_compiler_found; then if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_X32 >/dev/null then LIBCABI="$LIBC"x32 fi fi echo "$UNAME_MACHINE"-pc-linux-"$LIBCABI" exit ;; xtensa*:Linux:*:*) echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo "$UNAME_MACHINE"-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo "$UNAME_MACHINE"-unknown-stop exit ;; i*86:atheos:*:*) echo "$UNAME_MACHINE"-unknown-atheos exit ;; i*86:syllable:*:*) echo "$UNAME_MACHINE"-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos"$UNAME_RELEASE" exit ;; i*86:*DOS:*:*) echo "$UNAME_MACHINE"-pc-msdosdjgpp exit ;; i*86:*:4.*:*) UNAME_REL=$(echo "$UNAME_RELEASE" | sed 's/\/MP$//') if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" else echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case $(/bin/uname -X | grep "^Machine") in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}" exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=$(sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=$( (/bin/uname -X|grep Release|sed -e 's/.*= //')) (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" else echo "$UNAME_MACHINE"-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos"$UNAME_RELEASE" exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos"$UNAME_RELEASE" exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos"$UNAME_RELEASE" exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos"$UNAME_RELEASE" exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv"$UNAME_RELEASE" exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=$( (uname -p) 2>/dev/null) echo "$UNAME_MACHINE"-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo "$UNAME_MACHINE"-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux"$UNAME_RELEASE" exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if test -d /usr/nec; then echo mips-nec-sysv"$UNAME_RELEASE" else echo mips-unknown-sysv"$UNAME_RELEASE" fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; x86_64:Haiku:*:*) echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux"$UNAME_RELEASE" exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux"$UNAME_RELEASE" exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux"$UNAME_RELEASE" exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux"$UNAME_RELEASE" exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux"$UNAME_RELEASE" exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux"$UNAME_RELEASE" exit ;; SX-ACE:SUPER-UX:*:*) echo sxace-nec-superux"$UNAME_RELEASE" exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody"$UNAME_RELEASE" exit ;; *:Rhapsody:*:*) echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" exit ;; arm64:Darwin:*:*) echo aarch64-apple-darwin"$UNAME_RELEASE" exit ;; *:Darwin:*:*) UNAME_PROCESSOR=$(uname -p) case $UNAME_PROCESSOR in unknown) UNAME_PROCESSOR=powerpc ;; esac if command -v xcode-select > /dev/null 2> /dev/null && \ ! xcode-select --print-path > /dev/null 2> /dev/null ; then # Avoid executing cc if there is no toolchain installed as # cc will be a stub that puts up a graphical alert # prompting the user to install developer tools. CC_FOR_BUILD=no_compiler_found else set_cc_for_build fi if test "$CC_FOR_BUILD" != no_compiler_found; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_PPC >/dev/null then UNAME_PROCESSOR=powerpc fi elif test "$UNAME_PROCESSOR" = i386 ; then # uname -m returns i386 or x86_64 UNAME_PROCESSOR=$UNAME_MACHINE fi echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=$(uname -p) if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-*:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk"$UNAME_RELEASE" exit ;; NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk"$UNAME_RELEASE" exit ;; NSR-*:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk"$UNAME_RELEASE" exit ;; NSV-*:NONSTOP_KERNEL:*:*) echo nsv-tandem-nsk"$UNAME_RELEASE" exit ;; NSX-*:NONSTOP_KERNEL:*:*) echo nsx-tandem-nsk"$UNAME_RELEASE" exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. # shellcheck disable=SC2154 if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo "$UNAME_MACHINE"-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux"$UNAME_RELEASE" exit ;; *:DragonFly:*:*) echo "$UNAME_MACHINE"-unknown-dragonfly"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')" exit ;; *:*VMS:*:*) UNAME_MACHINE=$( (uname -p) 2>/dev/null) case "$UNAME_MACHINE" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo "$UNAME_MACHINE"-pc-skyos"$(echo "$UNAME_RELEASE" | sed -e 's/ .*$//')" exit ;; i*86:rdos:*:*) echo "$UNAME_MACHINE"-pc-rdos exit ;; *:AROS:*:*) echo "$UNAME_MACHINE"-unknown-aros exit ;; x86_64:VMkernel:*:*) echo "$UNAME_MACHINE"-unknown-esx exit ;; amd64:Isilon\ OneFS:*:*) echo x86_64-unknown-onefs exit ;; *:Unleashed:*:*) echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE" exit ;; esac # No uname command or uname output not recognized. set_cc_for_build cat > "$dummy.c" < #include #endif #if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) #if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) #include #if defined(_SIZE_T_) || defined(SIGLOST) #include #endif #endif #endif main () { #if defined (sony) #if defined (MIPSEB) /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, I don't know.... */ printf ("mips-sony-bsd\n"); exit (0); #else #include printf ("m68k-sony-newsos%s\n", #ifdef NEWSOS4 "4" #else "" #endif ); exit (0); #endif #endif #if defined (NeXT) #if !defined (__ARCHITECTURE__) #define __ARCHITECTURE__ "m68k" #endif int version; version=$( (hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null); if (version < 4) printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); else printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); exit (0); #endif #if defined (MULTIMAX) || defined (n16) #if defined (UMAXV) printf ("ns32k-encore-sysv\n"); exit (0); #else #if defined (CMU) printf ("ns32k-encore-mach\n"); exit (0); #else printf ("ns32k-encore-bsd\n"); exit (0); #endif #endif #endif #if defined (__386BSD__) printf ("i386-pc-bsd\n"); exit (0); #endif #if defined (sequent) #if defined (i386) printf ("i386-sequent-dynix\n"); exit (0); #endif #if defined (ns32000) printf ("ns32k-sequent-dynix\n"); exit (0); #endif #endif #if defined (_SEQUENT_) struct utsname un; uname(&un); if (strncmp(un.version, "V2", 2) == 0) { printf ("i386-sequent-ptx2\n"); exit (0); } if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ printf ("i386-sequent-ptx1\n"); exit (0); } printf ("i386-sequent-ptx\n"); exit (0); #endif #if defined (vax) #if !defined (ultrix) #include #if defined (BSD) #if BSD == 43 printf ("vax-dec-bsd4.3\n"); exit (0); #else #if BSD == 199006 printf ("vax-dec-bsd4.3reno\n"); exit (0); #else printf ("vax-dec-bsd\n"); exit (0); #endif #endif #else printf ("vax-dec-bsd\n"); exit (0); #endif #else #if defined(_SIZE_T_) || defined(SIGLOST) struct utsname un; uname (&un); printf ("vax-dec-ultrix%s\n", un.release); exit (0); #else printf ("vax-dec-ultrix\n"); exit (0); #endif #endif #endif #if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__) #if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__) #if defined(_SIZE_T_) || defined(SIGLOST) struct utsname *un; uname (&un); printf ("mips-dec-ultrix%s\n", un.release); exit (0); #else printf ("mips-dec-ultrix\n"); exit (0); #endif #endif #endif #if defined (alliant) && defined (i860) printf ("i860-alliant-bsd\n"); exit (0); #endif exit (1); } EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=$($dummy) && { echo "$SYSTEM_NAME"; exit; } # Apollos put the system type in the environment. test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; } echo "$0: unable to guess system type" >&2 case "$UNAME_MACHINE:$UNAME_SYSTEM" in mips:Linux | mips64:Linux) # If we got here on MIPS GNU/Linux, output extra information. cat >&2 <&2 <&2 </dev/null || echo unknown) uname -r = $( (uname -r) 2>/dev/null || echo unknown) uname -s = $( (uname -s) 2>/dev/null || echo unknown) uname -v = $( (uname -v) 2>/dev/null || echo unknown) /usr/bin/uname -p = $( (/usr/bin/uname -p) 2>/dev/null) /bin/uname -X = $( (/bin/uname -X) 2>/dev/null) hostinfo = $( (hostinfo) 2>/dev/null) /bin/universe = $( (/bin/universe) 2>/dev/null) /usr/bin/arch -k = $( (/usr/bin/arch -k) 2>/dev/null) /bin/arch = $( (/bin/arch) 2>/dev/null) /usr/bin/oslevel = $( (/usr/bin/oslevel) 2>/dev/null) /usr/convex/getsysinfo = $( (/usr/convex/getsysinfo) 2>/dev/null) UNAME_MACHINE = "$UNAME_MACHINE" UNAME_RELEASE = "$UNAME_RELEASE" UNAME_SYSTEM = "$UNAME_SYSTEM" UNAME_VERSION = "$UNAME_VERSION" EOF fi exit 1 # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: autoconf-2.71/build-aux/config.sub0000755000000000000000000010315414004621310014020 00000000000000#! /bin/sh # Configuration validation subroutine script. # Copyright 1992-2021 Free Software Foundation, Inc. timestamp='2021-01-08' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # Please send patches to . # # Configuration subroutine to validate and canonicalize a configuration type. # Supply the specified configuration type as an argument. # If it is invalid, we print an error message on stderr and exit with code 1. # Otherwise, we print the canonical config type on stdout and succeed. # You can get the latest version of this script from: # https://git.savannah.gnu.org/cgit/config.git/plain/config.sub # This file is supposed to be the same for all GNU packages # and recognize all the CPU types, system types and aliases # that are meaningful with *any* GNU software. # Each package is responsible for reporting which valid configurations # it does not support. The user should be able to distinguish # a failure to support a valid configuration from a meaningless # configuration. # The goal of this file is to map all the various variations of a given # machine specification into a single specification in the form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM # or in some cases, the newer four-part form: # CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM # It is wrong to echo any other type of specification. me=$(echo "$0" | sed -e 's,.*/,,') usage="\ Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS Canonicalize a configuration name. Options: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.sub ($timestamp) Copyright 1992-2021 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; *local*) # First pass through any local machine types. echo "$1" exit ;; * ) break ;; esac done case $# in 0) echo "$me: missing argument$help" >&2 exit 1;; 1) ;; *) echo "$me: too many arguments$help" >&2 exit 1;; esac # Split fields of configuration type # shellcheck disable=SC2162 IFS="-" read field1 field2 field3 field4 <&2 exit 1 ;; *-*-*-*) basic_machine=$field1-$field2 basic_os=$field3-$field4 ;; *-*-*) # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two # parts maybe_os=$field2-$field3 case $maybe_os in nto-qnx* | linux-* | uclinux-uclibc* \ | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ | storm-chaos* | os2-emx* | rtmk-nova*) basic_machine=$field1 basic_os=$maybe_os ;; android-linux) basic_machine=$field1-unknown basic_os=linux-android ;; *) basic_machine=$field1-$field2 basic_os=$field3 ;; esac ;; *-*) # A lone config we happen to match not fitting any pattern case $field1-$field2 in decstation-3100) basic_machine=mips-dec basic_os= ;; *-*) # Second component is usually, but not always the OS case $field2 in # Prevent following clause from handling this valid os sun*os*) basic_machine=$field1 basic_os=$field2 ;; # Manufacturers dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ | unicom* | ibm* | next | hp | isi* | apollo | altos* \ | convergent* | ncr* | news | 32* | 3600* | 3100* \ | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \ | ultra | tti* | harris | dolphin | highlevel | gould \ | cbm | ns | masscomp | apple | axis | knuth | cray \ | microblaze* | sim | cisco \ | oki | wec | wrs | winbond) basic_machine=$field1-$field2 basic_os= ;; *) basic_machine=$field1 basic_os=$field2 ;; esac ;; esac ;; *) # Convert single-component short-hands not valid as part of # multi-component configurations. case $field1 in 386bsd) basic_machine=i386-pc basic_os=bsd ;; a29khif) basic_machine=a29k-amd basic_os=udi ;; adobe68k) basic_machine=m68010-adobe basic_os=scout ;; alliant) basic_machine=fx80-alliant basic_os= ;; altos | altos3068) basic_machine=m68k-altos basic_os= ;; am29k) basic_machine=a29k-none basic_os=bsd ;; amdahl) basic_machine=580-amdahl basic_os=sysv ;; amiga) basic_machine=m68k-unknown basic_os= ;; amigaos | amigados) basic_machine=m68k-unknown basic_os=amigaos ;; amigaunix | amix) basic_machine=m68k-unknown basic_os=sysv4 ;; apollo68) basic_machine=m68k-apollo basic_os=sysv ;; apollo68bsd) basic_machine=m68k-apollo basic_os=bsd ;; aros) basic_machine=i386-pc basic_os=aros ;; aux) basic_machine=m68k-apple basic_os=aux ;; balance) basic_machine=ns32k-sequent basic_os=dynix ;; blackfin) basic_machine=bfin-unknown basic_os=linux ;; cegcc) basic_machine=arm-unknown basic_os=cegcc ;; convex-c1) basic_machine=c1-convex basic_os=bsd ;; convex-c2) basic_machine=c2-convex basic_os=bsd ;; convex-c32) basic_machine=c32-convex basic_os=bsd ;; convex-c34) basic_machine=c34-convex basic_os=bsd ;; convex-c38) basic_machine=c38-convex basic_os=bsd ;; cray) basic_machine=j90-cray basic_os=unicos ;; crds | unos) basic_machine=m68k-crds basic_os= ;; da30) basic_machine=m68k-da30 basic_os= ;; decstation | pmax | pmin | dec3100 | decstatn) basic_machine=mips-dec basic_os= ;; delta88) basic_machine=m88k-motorola basic_os=sysv3 ;; dicos) basic_machine=i686-pc basic_os=dicos ;; djgpp) basic_machine=i586-pc basic_os=msdosdjgpp ;; ebmon29k) basic_machine=a29k-amd basic_os=ebmon ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson basic_os=ose ;; gmicro) basic_machine=tron-gmicro basic_os=sysv ;; go32) basic_machine=i386-pc basic_os=go32 ;; h8300hms) basic_machine=h8300-hitachi basic_os=hms ;; h8300xray) basic_machine=h8300-hitachi basic_os=xray ;; h8500hms) basic_machine=h8500-hitachi basic_os=hms ;; harris) basic_machine=m88k-harris basic_os=sysv3 ;; hp300 | hp300hpux) basic_machine=m68k-hp basic_os=hpux ;; hp300bsd) basic_machine=m68k-hp basic_os=bsd ;; hppaosf) basic_machine=hppa1.1-hp basic_os=osf ;; hppro) basic_machine=hppa1.1-hp basic_os=proelf ;; i386mach) basic_machine=i386-mach basic_os=mach ;; isi68 | isi) basic_machine=m68k-isi basic_os=sysv ;; m68knommu) basic_machine=m68k-unknown basic_os=linux ;; magnum | m3230) basic_machine=mips-mips basic_os=sysv ;; merlin) basic_machine=ns32k-utek basic_os=sysv ;; mingw64) basic_machine=x86_64-pc basic_os=mingw64 ;; mingw32) basic_machine=i686-pc basic_os=mingw32 ;; mingw32ce) basic_machine=arm-unknown basic_os=mingw32ce ;; monitor) basic_machine=m68k-rom68k basic_os=coff ;; morphos) basic_machine=powerpc-unknown basic_os=morphos ;; moxiebox) basic_machine=moxie-unknown basic_os=moxiebox ;; msdos) basic_machine=i386-pc basic_os=msdos ;; msys) basic_machine=i686-pc basic_os=msys ;; mvs) basic_machine=i370-ibm basic_os=mvs ;; nacl) basic_machine=le32-unknown basic_os=nacl ;; ncr3000) basic_machine=i486-ncr basic_os=sysv4 ;; netbsd386) basic_machine=i386-pc basic_os=netbsd ;; netwinder) basic_machine=armv4l-rebel basic_os=linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony basic_os=newsos ;; news1000) basic_machine=m68030-sony basic_os=newsos ;; necv70) basic_machine=v70-nec basic_os=sysv ;; nh3000) basic_machine=m68k-harris basic_os=cxux ;; nh[45]000) basic_machine=m88k-harris basic_os=cxux ;; nindy960) basic_machine=i960-intel basic_os=nindy ;; mon960) basic_machine=i960-intel basic_os=mon960 ;; nonstopux) basic_machine=mips-compaq basic_os=nonstopux ;; os400) basic_machine=powerpc-ibm basic_os=os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson basic_os=ose ;; os68k) basic_machine=m68k-none basic_os=os68k ;; paragon) basic_machine=i860-intel basic_os=osf ;; parisc) basic_machine=hppa-unknown basic_os=linux ;; psp) basic_machine=mipsallegrexel-sony basic_os=psp ;; pw32) basic_machine=i586-unknown basic_os=pw32 ;; rdos | rdos64) basic_machine=x86_64-pc basic_os=rdos ;; rdos32) basic_machine=i386-pc basic_os=rdos ;; rom68k) basic_machine=m68k-rom68k basic_os=coff ;; sa29200) basic_machine=a29k-amd basic_os=udi ;; sei) basic_machine=mips-sei basic_os=seiux ;; sequent) basic_machine=i386-sequent basic_os= ;; sps7) basic_machine=m68k-bull basic_os=sysv2 ;; st2000) basic_machine=m68k-tandem basic_os= ;; stratus) basic_machine=i860-stratus basic_os=sysv4 ;; sun2) basic_machine=m68000-sun basic_os= ;; sun2os3) basic_machine=m68000-sun basic_os=sunos3 ;; sun2os4) basic_machine=m68000-sun basic_os=sunos4 ;; sun3) basic_machine=m68k-sun basic_os= ;; sun3os3) basic_machine=m68k-sun basic_os=sunos3 ;; sun3os4) basic_machine=m68k-sun basic_os=sunos4 ;; sun4) basic_machine=sparc-sun basic_os= ;; sun4os3) basic_machine=sparc-sun basic_os=sunos3 ;; sun4os4) basic_machine=sparc-sun basic_os=sunos4 ;; sun4sol2) basic_machine=sparc-sun basic_os=solaris2 ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun basic_os= ;; sv1) basic_machine=sv1-cray basic_os=unicos ;; symmetry) basic_machine=i386-sequent basic_os=dynix ;; t3e) basic_machine=alphaev5-cray basic_os=unicos ;; t90) basic_machine=t90-cray basic_os=unicos ;; toad1) basic_machine=pdp10-xkl basic_os=tops20 ;; tpf) basic_machine=s390x-ibm basic_os=tpf ;; udi29k) basic_machine=a29k-amd basic_os=udi ;; ultra3) basic_machine=a29k-nyu basic_os=sym1 ;; v810 | necv810) basic_machine=v810-nec basic_os=none ;; vaxv) basic_machine=vax-dec basic_os=sysv ;; vms) basic_machine=vax-dec basic_os=vms ;; vsta) basic_machine=i386-pc basic_os=vsta ;; vxworks960) basic_machine=i960-wrs basic_os=vxworks ;; vxworks68) basic_machine=m68k-wrs basic_os=vxworks ;; vxworks29k) basic_machine=a29k-wrs basic_os=vxworks ;; xbox) basic_machine=i686-pc basic_os=mingw32 ;; ymp) basic_machine=ymp-cray basic_os=unicos ;; *) basic_machine=$1 basic_os= ;; esac ;; esac # Decode 1-component or ad-hoc basic machines case $basic_machine in # Here we handle the default manufacturer of certain CPU types. It is in # some cases the only manufacturer, in others, it is the most popular. w89k) cpu=hppa1.1 vendor=winbond ;; op50n) cpu=hppa1.1 vendor=oki ;; op60c) cpu=hppa1.1 vendor=oki ;; ibm*) cpu=i370 vendor=ibm ;; orion105) cpu=clipper vendor=highlevel ;; mac | mpw | mac-mpw) cpu=m68k vendor=apple ;; pmac | pmac-mpw) cpu=powerpc vendor=apple ;; # Recognize the various machine names and aliases which stand # for a CPU type and a company and sometimes even an OS. 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) cpu=m68000 vendor=att ;; 3b*) cpu=we32k vendor=att ;; bluegene*) cpu=powerpc vendor=ibm basic_os=cnk ;; decsystem10* | dec10*) cpu=pdp10 vendor=dec basic_os=tops10 ;; decsystem20* | dec20*) cpu=pdp10 vendor=dec basic_os=tops20 ;; delta | 3300 | motorola-3300 | motorola-delta \ | 3300-motorola | delta-motorola) cpu=m68k vendor=motorola ;; dpx2*) cpu=m68k vendor=bull basic_os=sysv3 ;; encore | umax | mmax) cpu=ns32k vendor=encore ;; elxsi) cpu=elxsi vendor=elxsi basic_os=${basic_os:-bsd} ;; fx2800) cpu=i860 vendor=alliant ;; genix) cpu=ns32k vendor=ns ;; h3050r* | hiux*) cpu=hppa1.1 vendor=hitachi basic_os=hiuxwe2 ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) cpu=hppa1.0 vendor=hp ;; hp9k2[0-9][0-9] | hp9k31[0-9]) cpu=m68000 vendor=hp ;; hp9k3[2-9][0-9]) cpu=m68k vendor=hp ;; hp9k6[0-9][0-9] | hp6[0-9][0-9]) cpu=hppa1.0 vendor=hp ;; hp9k7[0-79][0-9] | hp7[0-79][0-9]) cpu=hppa1.1 vendor=hp ;; hp9k78[0-9] | hp78[0-9]) # FIXME: really hppa2.0-hp cpu=hppa1.1 vendor=hp ;; hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) # FIXME: really hppa2.0-hp cpu=hppa1.1 vendor=hp ;; hp9k8[0-9][13679] | hp8[0-9][13679]) cpu=hppa1.1 vendor=hp ;; hp9k8[0-9][0-9] | hp8[0-9][0-9]) cpu=hppa1.0 vendor=hp ;; i*86v32) cpu=$(echo "$1" | sed -e 's/86.*/86/') vendor=pc basic_os=sysv32 ;; i*86v4*) cpu=$(echo "$1" | sed -e 's/86.*/86/') vendor=pc basic_os=sysv4 ;; i*86v) cpu=$(echo "$1" | sed -e 's/86.*/86/') vendor=pc basic_os=sysv ;; i*86sol2) cpu=$(echo "$1" | sed -e 's/86.*/86/') vendor=pc basic_os=solaris2 ;; j90 | j90-cray) cpu=j90 vendor=cray basic_os=${basic_os:-unicos} ;; iris | iris4d) cpu=mips vendor=sgi case $basic_os in irix*) ;; *) basic_os=irix4 ;; esac ;; miniframe) cpu=m68000 vendor=convergent ;; *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) cpu=m68k vendor=atari basic_os=mint ;; news-3600 | risc-news) cpu=mips vendor=sony basic_os=newsos ;; next | m*-next) cpu=m68k vendor=next case $basic_os in openstep*) ;; nextstep*) ;; ns2*) basic_os=nextstep2 ;; *) basic_os=nextstep3 ;; esac ;; np1) cpu=np1 vendor=gould ;; op50n-* | op60c-*) cpu=hppa1.1 vendor=oki basic_os=proelf ;; pa-hitachi) cpu=hppa1.1 vendor=hitachi basic_os=hiuxwe2 ;; pbd) cpu=sparc vendor=tti ;; pbb) cpu=m68k vendor=tti ;; pc532) cpu=ns32k vendor=pc532 ;; pn) cpu=pn vendor=gould ;; power) cpu=power vendor=ibm ;; ps2) cpu=i386 vendor=ibm ;; rm[46]00) cpu=mips vendor=siemens ;; rtpc | rtpc-*) cpu=romp vendor=ibm ;; sde) cpu=mipsisa32 vendor=sde basic_os=${basic_os:-elf} ;; simso-wrs) cpu=sparclite vendor=wrs basic_os=vxworks ;; tower | tower-32) cpu=m68k vendor=ncr ;; vpp*|vx|vx-*) cpu=f301 vendor=fujitsu ;; w65) cpu=w65 vendor=wdc ;; w89k-*) cpu=hppa1.1 vendor=winbond basic_os=proelf ;; none) cpu=none vendor=none ;; leon|leon[3-9]) cpu=sparc vendor=$basic_machine ;; leon-*|leon[3-9]-*) cpu=sparc vendor=$(echo "$basic_machine" | sed 's/-.*//') ;; *-*) # shellcheck disable=SC2162 IFS="-" read cpu vendor <&2 exit 1 ;; esac ;; esac # Here we canonicalize certain aliases for manufacturers. case $vendor in digital*) vendor=dec ;; commodore*) vendor=cbm ;; *) ;; esac # Decode manufacturer-specific aliases for certain operating systems. if test x$basic_os != x then # First recognize some ad-hoc caes, or perhaps split kernel-os, or else just # set os. case $basic_os in gnu/linux*) kernel=linux os=$(echo $basic_os | sed -e 's|gnu/linux|gnu|') ;; os2-emx) kernel=os2 os=$(echo $basic_os | sed -e 's|os2-emx|emx|') ;; nto-qnx*) kernel=nto os=$(echo $basic_os | sed -e 's|nto-qnx|qnx|') ;; *-*) # shellcheck disable=SC2162 IFS="-" read kernel os <&2 exit 1 ;; esac # As a final step for OS-related things, validate the OS-kernel combination # (given a valid OS), if there is a kernel. case $kernel-$os in linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* | linux-musl* | linux-uclibc* ) ;; uclinux-uclibc* ) ;; -dietlibc* | -newlib* | -musl* | -uclibc* ) # These are just libc implementations, not actual OSes, and thus # require a kernel. echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2 exit 1 ;; kfreebsd*-gnu* | kopensolaris*-gnu*) ;; vxworks-simlinux | vxworks-simwindows | vxworks-spe) ;; nto-qnx*) ;; os2-emx) ;; *-eabi* | *-gnueabi*) ;; -*) # Blank kernel with real OS is always fine. ;; *-*) echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2 exit 1 ;; esac # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. case $vendor in unknown) case $cpu-$os in *-riscix*) vendor=acorn ;; *-sunos*) vendor=sun ;; *-cnk* | *-aix*) vendor=ibm ;; *-beos*) vendor=be ;; *-hpux*) vendor=hp ;; *-mpeix*) vendor=hp ;; *-hiux*) vendor=hitachi ;; *-unos*) vendor=crds ;; *-dgux*) vendor=dg ;; *-luna*) vendor=omron ;; *-genix*) vendor=ns ;; *-clix*) vendor=intergraph ;; *-mvs* | *-opened*) vendor=ibm ;; *-os400*) vendor=ibm ;; s390-* | s390x-*) vendor=ibm ;; *-ptx*) vendor=sequent ;; *-tpf*) vendor=ibm ;; *-vxsim* | *-vxworks* | *-windiss*) vendor=wrs ;; *-aux*) vendor=apple ;; *-hms*) vendor=hitachi ;; *-mpw* | *-macos*) vendor=apple ;; *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*) vendor=atari ;; *-vos*) vendor=stratus ;; esac ;; esac echo "$cpu-$vendor-${kernel:+$kernel-}$os" exit # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: autoconf-2.71/build-aux/install-sh0000755000000000000000000003577613765663120014101 00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2020-11-14.01; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # 'make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. tab=' ' nl=' ' IFS=" $tab$nl" # Set DOITPROG to "echo" to test this script. doit=${DOITPROG-} doit_exec=${doit:-exec} # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_mkdir= # Desired mode of installed file. mode=0755 # Create dirs (including intermediate dirs) using mode 755. # This is like GNU 'install' as of coreutils 8.32 (2020). mkdir_umask=22 backupsuffix= chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false is_target_a_directory=possibly usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -p pass -p to $cpprog. -s $stripprog installed files. -S SUFFIX attempt to back up existing files, with suffix SUFFIX. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG By default, rm is invoked with -f; when overridden with RMPROG, it's up to you to specify -f if you want it. If -S is not specified, no backups are attempted. Email bug reports to bug-automake@gnu.org. Automake home page: https://www.gnu.org/software/automake/ " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -p) cpprog="$cpprog -p";; -s) stripcmd=$stripprog;; -S) backupsuffix="$2" shift;; -t) is_target_a_directory=always dst_arg=$2 # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac shift;; -T) is_target_a_directory=never;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done # We allow the use of options -d and -T together, by making -d # take the precedence; this is for compatibility with GNU install. if test -n "$dir_arg"; then if test -n "$dst_arg"; then echo "$0: target directory not allowed when installing a directory." >&2 exit 1 fi fi if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call 'install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then if test $# -gt 1 || test "$is_target_a_directory" = always; then if test ! -d "$dst_arg"; then echo "$0: $dst_arg: Is not a directory." >&2 exit 1 fi fi fi if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 trap "ret=130; $do_exit" 2 trap "ret=141; $do_exit" 13 trap "ret=143; $do_exit" 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names problematic for 'test' and other utilities. case $src in -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? # Don't chown directories that already exist. if test $dstdir_status = 0; then chowncmd="" fi else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # If destination is a directory, append the input filename. if test -d "$dst"; then if test "$is_target_a_directory" = never; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dstbase=`basename "$src"` case $dst in */) dst=$dst$dstbase;; *) dst=$dst/$dstbase;; esac dstdir_status=0 else dstdir=`dirname "$dst"` test -d "$dstdir" dstdir_status=$? fi fi case $dstdir in */) dstdirslash=$dstdir;; *) dstdirslash=$dstdir/;; esac obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false # The $RANDOM variable is not portable (e.g., dash). Use it # here however when possible just to lower collision chance. tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap ' ret=$? rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null exit $ret ' 0 # Because "mkdir -p" follows existing symlinks and we likely work # directly in world-writeable /tmp, make sure that the '$tmpdir' # directory is successfully created first before we actually test # 'mkdir -p'. if (umask $mkdir_umask && $mkdirprog $mkdir_mode "$tmpdir" && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. test_tmpdir="$tmpdir/a" ls_ld_tmpdir=`ls -ld "$test_tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$test_tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null fi trap '' 0;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; [-=\(\)!]*) prefix='./';; *) prefix='';; esac oIFS=$IFS IFS=/ set -f set fnord $dstdir shift set +f IFS=$oIFS prefixes= for d do test X"$d" = X && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=${dstdirslash}_inst.$$_ rmtmp=${dstdirslash}_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && { test -z "$stripcmd" || { # Create $dsttmp read-write so that cp doesn't create it read-only, # which would cause strip to fail. if test -z "$doit"; then : >"$dsttmp" # No need to fork-exec 'touch'. else $doit touch "$dsttmp" fi } } && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # If $backupsuffix is set, and the file being installed # already exists, attempt a backup. Don't worry if it fails, # e.g., if mv doesn't support -f. if test -n "$backupsuffix" && test -f "$dst"; then $doit $mvcmd -f "$dst" "$dst$backupsuffix" 2>/dev/null fi # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: autoconf-2.71/build-aux/mdate-sh0000755000000000000000000001373214004623432013476 00000000000000#!/bin/sh # Get modification time of a file or directory and pretty-print it. scriptversion=2018-03-07.03; # UTC # Copyright (C) 1995-2020 Free Software Foundation, Inc. # written by Ulrich Drepper , June 1995 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST fi case $1 in '') echo "$0: No file. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: mdate-sh [--help] [--version] FILE Pretty-print the modification day of FILE, in the format: 1 January 1970 Report bugs to . EOF exit $? ;; -v | --v*) echo "mdate-sh $scriptversion" exit $? ;; esac error () { echo "$0: $1" >&2 exit 1 } # Prevent date giving response in another language. LANG=C export LANG LC_ALL=C export LC_ALL LC_TIME=C export LC_TIME # Use UTC to get reproducible result. TZ=UTC0 export TZ # GNU ls changes its time format in response to the TIME_STYLE # variable. Since we cannot assume 'unset' works, revert this # variable to its documented default. if test "${TIME_STYLE+set}" = set; then TIME_STYLE=posix-long-iso export TIME_STYLE fi save_arg1=$1 # Find out how to get the extended ls output of a file or directory. if ls -L /dev/null 1>/dev/null 2>&1; then ls_command='ls -L -l -d' else ls_command='ls -l -d' fi # Avoid user/group names that might have spaces, when possible. if ls -n /dev/null 1>/dev/null 2>&1; then ls_command="$ls_command -n" fi # A 'ls -l' line looks as follows on OS/2. # drwxrwx--- 0 Aug 11 2001 foo # This differs from Unix, which adds ownership information. # drwxrwx--- 2 root root 4096 Aug 11 2001 foo # # To find the date, we split the line on spaces and iterate on words # until we find a month. This cannot work with files whose owner is a # user named "Jan", or "Feb", etc. However, it's unlikely that '/' # will be owned by a user whose name is a month. So we first look at # the extended ls output of the root directory to decide how many # words should be skipped to get the date. # On HPUX /bin/sh, "set" interprets "-rw-r--r--" as options, so the "x" below. set x`$ls_command /` # Find which argument is the month. month= command= until test $month do test $# -gt 0 || error "failed parsing '$ls_command /' output" shift # Add another shift to the command. command="$command shift;" case $1 in Jan) month=January; nummonth=1;; Feb) month=February; nummonth=2;; Mar) month=March; nummonth=3;; Apr) month=April; nummonth=4;; May) month=May; nummonth=5;; Jun) month=June; nummonth=6;; Jul) month=July; nummonth=7;; Aug) month=August; nummonth=8;; Sep) month=September; nummonth=9;; Oct) month=October; nummonth=10;; Nov) month=November; nummonth=11;; Dec) month=December; nummonth=12;; esac done test -n "$month" || error "failed parsing '$ls_command /' output" # Get the extended ls output of the file or directory. set dummy x`eval "$ls_command \"\\\$save_arg1\""` # Remove all preceding arguments eval $command # Because of the dummy argument above, month is in $2. # # On a POSIX system, we should have # # $# = 5 # $1 = file size # $2 = month # $3 = day # $4 = year or time # $5 = filename # # On Darwin 7.7.0 and 7.6.0, we have # # $# = 4 # $1 = day # $2 = month # $3 = year or time # $4 = filename # Get the month. case $2 in Jan) month=January; nummonth=1;; Feb) month=February; nummonth=2;; Mar) month=March; nummonth=3;; Apr) month=April; nummonth=4;; May) month=May; nummonth=5;; Jun) month=June; nummonth=6;; Jul) month=July; nummonth=7;; Aug) month=August; nummonth=8;; Sep) month=September; nummonth=9;; Oct) month=October; nummonth=10;; Nov) month=November; nummonth=11;; Dec) month=December; nummonth=12;; esac case $3 in ???*) day=$1;; *) day=$3; shift;; esac # Here we have to deal with the problem that the ls output gives either # the time of day or the year. case $3 in *:*) set `date`; eval year=\$$# case $2 in Jan) nummonthtod=1;; Feb) nummonthtod=2;; Mar) nummonthtod=3;; Apr) nummonthtod=4;; May) nummonthtod=5;; Jun) nummonthtod=6;; Jul) nummonthtod=7;; Aug) nummonthtod=8;; Sep) nummonthtod=9;; Oct) nummonthtod=10;; Nov) nummonthtod=11;; Dec) nummonthtod=12;; esac # For the first six month of the year the time notation can also # be used for files modified in the last year. if (expr $nummonth \> $nummonthtod) > /dev/null; then year=`expr $year - 1` fi;; *) year=$3;; esac # The result. echo $day $month $year # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: autoconf-2.71/build-aux/missing0000755000000000000000000001533614004623432013447 00000000000000#! /bin/sh # Common wrapper for a few potentially missing GNU programs. scriptversion=2018-03-07.03; # UTC # Copyright (C) 1996-2020 Free Software Foundation, Inc. # Originally written by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try '$0 --help' for more information" exit 1 fi case $1 in --is-lightweight) # Used by our autoconf macros to check whether the available missing # script is modern enough. exit 0 ;; --run) # Back-compat with the calling convention used by older automake. shift ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due to PROGRAM being missing or too old. Options: -h, --help display this help and exit -v, --version output version information and exit Supported PROGRAM values: aclocal autoconf autoheader autom4te automake makeinfo bison yacc flex lex help2man Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and 'g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: unknown '$1' option" echo 1>&2 "Try '$0 --help' for more information" exit 1 ;; esac # Run the given program, remember its exit status. "$@"; st=$? # If it succeeded, we are done. test $st -eq 0 && exit 0 # Also exit now if we it failed (or wasn't found), and '--version' was # passed; such an option is passed most likely to detect whether the # program is present and works. case $2 in --version|--help) exit $st;; esac # Exit code 63 means version mismatch. This often happens when the user # tries to use an ancient version of a tool on a file that requires a # minimum version. if test $st -eq 63; then msg="probably too old" elif test $st -eq 127; then # Program was missing. msg="missing on your system" else # Program was found and executed, but failed. Give up. exit $st fi perl_URL=https://www.perl.org/ flex_URL=https://github.com/westes/flex gnu_software_URL=https://www.gnu.org/software program_details () { case $1 in aclocal|automake) echo "The '$1' program is part of the GNU Automake package:" echo "<$gnu_software_URL/automake>" echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/autoconf>" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; autoconf|autom4te|autoheader) echo "The '$1' program is part of the GNU Autoconf package:" echo "<$gnu_software_URL/autoconf/>" echo "It also requires GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; esac } give_advice () { # Normalize program name to check for. normalized_program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` printf '%s\n' "'$1' is $msg." configure_deps="'configure.ac' or m4 files included by 'configure.ac'" case $normalized_program in autoconf*) echo "You should only need it if you modified 'configure.ac'," echo "or m4 files included by it." program_details 'autoconf' ;; autoheader*) echo "You should only need it if you modified 'acconfig.h' or" echo "$configure_deps." program_details 'autoheader' ;; automake*) echo "You should only need it if you modified 'Makefile.am' or" echo "$configure_deps." program_details 'automake' ;; aclocal*) echo "You should only need it if you modified 'acinclude.m4' or" echo "$configure_deps." program_details 'aclocal' ;; autom4te*) echo "You might have modified some maintainer files that require" echo "the 'autom4te' program to be rebuilt." program_details 'autom4te' ;; bison*|yacc*) echo "You should only need it if you modified a '.y' file." echo "You may want to install the GNU Bison package:" echo "<$gnu_software_URL/bison/>" ;; lex*|flex*) echo "You should only need it if you modified a '.l' file." echo "You may want to install the Fast Lexical Analyzer package:" echo "<$flex_URL>" ;; help2man*) echo "You should only need it if you modified a dependency" \ "of a man page." echo "You may want to install the GNU Help2man package:" echo "<$gnu_software_URL/help2man/>" ;; makeinfo*) echo "You should only need it if you modified a '.texi' file, or" echo "any other file indirectly affecting the aspect of the manual." echo "You might want to install the Texinfo package:" echo "<$gnu_software_URL/texinfo/>" echo "The spurious makeinfo call might also be the consequence of" echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" echo "want to install GNU make:" echo "<$gnu_software_URL/make/>" ;; *) echo "You might have modified some files without having the proper" echo "tools for further handling them. Check the 'README' file, it" echo "often tells you about the needed prerequisites for installing" echo "this package. You may also peek at any GNU archive site, in" echo "case some other package contains this missing '$1' program." ;; esac } give_advice "$1" | sed -e '1s/^/WARNING: /' \ -e '2,$s/^/ /' >&2 # Propagate the correct exit status (expected to be 127 for a program # not found, 63 for a program that failed due to version mismatch). exit $st # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: autoconf-2.71/build-aux/texinfo.tex0000644000000000000000000134474213765663120014270 00000000000000% texinfo.tex -- TeX macros to handle Texinfo files. % % Load plain if necessary, i.e., if running under initex. \expandafter\ifx\csname fmtname\endcsname\relax\input plain\fi % \def\texinfoversion{2020-11-25.18} % % Copyright 1985, 1986, 1988, 1990-2020 Free Software Foundation, Inc. % % This texinfo.tex file is free software: you can redistribute it and/or % modify it under the terms of the GNU General Public License as % published by the Free Software Foundation, either version 3 of the % License, or (at your option) any later version. % % This texinfo.tex file is distributed in the hope that it will be % useful, but WITHOUT ANY WARRANTY; without even the implied warranty % of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU % General Public License for more details. % % You should have received a copy of the GNU General Public License % along with this program. If not, see . % % As a special exception, when this file is read by TeX when processing % a Texinfo source document, you may use the result without % restriction. This Exception is an additional permission under section 7 % of the GNU General Public License, version 3 ("GPLv3"). % % Please try the latest version of texinfo.tex before submitting bug % reports; you can get the latest version from: % https://ftp.gnu.org/gnu/texinfo/ (the Texinfo release area), or % https://ftpmirror.gnu.org/texinfo/ (same, via a mirror), or % https://www.gnu.org/software/texinfo/ (the Texinfo home page) % The texinfo.tex in any given distribution could well be out % of date, so if that's what you're using, please check. % % Send bug reports to bug-texinfo@gnu.org. Please include a % complete document in each bug report with which we can reproduce the % problem. Patches are, of course, greatly appreciated. % % To process a Texinfo manual with TeX, it's most reliable to use the % texi2dvi shell script that comes with the distribution. For a simple % manual foo.texi, however, you can get away with this: % tex foo.texi % texindex foo.?? % tex foo.texi % tex foo.texi % dvips foo.dvi -o # or whatever; this makes foo.ps. % The extra TeX runs get the cross-reference information correct. % Sometimes one run after texindex suffices, and sometimes you need more % than two; texi2dvi does it as many times as necessary. % % It is possible to adapt texinfo.tex for other languages, to some % extent. You can get the existing language-specific files from the % full Texinfo distribution. % % The GNU Texinfo home page is https://www.gnu.org/software/texinfo. \message{Loading texinfo [version \texinfoversion]:} % If in a .fmt file, print the version number % and turn on active characters that we couldn't do earlier because % they might have appeared in the input file name. \everyjob{\message{[Texinfo version \texinfoversion]}% \catcode`+=\active \catcode`\_=\active} % LaTeX's \typeout. This ensures that the messages it is used for % are identical in format to the corresponding ones from latex/pdflatex. \def\typeout{\immediate\write17}% \chardef\other=12 % We never want plain's \outer definition of \+ in Texinfo. % For @tex, we can use \tabalign. \let\+ = \relax % Save some plain tex macros whose names we will redefine. \let\ptexb=\b \let\ptexbullet=\bullet \let\ptexc=\c \let\ptexcomma=\, \let\ptexdot=\. \let\ptexdots=\dots \let\ptexend=\end \let\ptexequiv=\equiv \let\ptexexclam=\! \let\ptexfootnote=\footnote \let\ptexgtr=> \let\ptexhat=^ \let\ptexi=\i \let\ptexindent=\indent \let\ptexinsert=\insert \let\ptexlbrace=\{ \let\ptexless=< \let\ptexnewwrite\newwrite \let\ptexnoindent=\noindent \let\ptexplus=+ \let\ptexraggedright=\raggedright \let\ptexrbrace=\} \let\ptexslash=\/ \let\ptexsp=\sp \let\ptexstar=\* \let\ptexsup=\sup \let\ptext=\t \let\ptextop=\top {\catcode`\'=\active \global\let\ptexquoteright'}% active in plain's math mode % If this character appears in an error message or help string, it % starts a new line in the output. \newlinechar = `^^J % Use TeX 3.0's \inputlineno to get the line number, for better error % messages, but if we're using an old version of TeX, don't do anything. % \ifx\inputlineno\thisisundefined \let\linenumber = \empty % Pre-3.0. \else \def\linenumber{l.\the\inputlineno:\space} \fi % Set up fixed words for English if not already set. \ifx\putwordAppendix\undefined \gdef\putwordAppendix{Appendix}\fi \ifx\putwordChapter\undefined \gdef\putwordChapter{Chapter}\fi \ifx\putworderror\undefined \gdef\putworderror{error}\fi \ifx\putwordfile\undefined \gdef\putwordfile{file}\fi \ifx\putwordin\undefined \gdef\putwordin{in}\fi \ifx\putwordIndexIsEmpty\undefined \gdef\putwordIndexIsEmpty{(Index is empty)}\fi \ifx\putwordIndexNonexistent\undefined \gdef\putwordIndexNonexistent{(Index is nonexistent)}\fi \ifx\putwordInfo\undefined \gdef\putwordInfo{Info}\fi \ifx\putwordInstanceVariableof\undefined \gdef\putwordInstanceVariableof{Instance Variable of}\fi \ifx\putwordMethodon\undefined \gdef\putwordMethodon{Method on}\fi \ifx\putwordNoTitle\undefined \gdef\putwordNoTitle{No Title}\fi \ifx\putwordof\undefined \gdef\putwordof{of}\fi \ifx\putwordon\undefined \gdef\putwordon{on}\fi \ifx\putwordpage\undefined \gdef\putwordpage{page}\fi \ifx\putwordsection\undefined \gdef\putwordsection{section}\fi \ifx\putwordSection\undefined \gdef\putwordSection{Section}\fi \ifx\putwordsee\undefined \gdef\putwordsee{see}\fi \ifx\putwordSee\undefined \gdef\putwordSee{See}\fi \ifx\putwordShortTOC\undefined \gdef\putwordShortTOC{Short Contents}\fi \ifx\putwordTOC\undefined \gdef\putwordTOC{Table of Contents}\fi % \ifx\putwordMJan\undefined \gdef\putwordMJan{January}\fi \ifx\putwordMFeb\undefined \gdef\putwordMFeb{February}\fi \ifx\putwordMMar\undefined \gdef\putwordMMar{March}\fi \ifx\putwordMApr\undefined \gdef\putwordMApr{April}\fi \ifx\putwordMMay\undefined \gdef\putwordMMay{May}\fi \ifx\putwordMJun\undefined \gdef\putwordMJun{June}\fi \ifx\putwordMJul\undefined \gdef\putwordMJul{July}\fi \ifx\putwordMAug\undefined \gdef\putwordMAug{August}\fi \ifx\putwordMSep\undefined \gdef\putwordMSep{September}\fi \ifx\putwordMOct\undefined \gdef\putwordMOct{October}\fi \ifx\putwordMNov\undefined \gdef\putwordMNov{November}\fi \ifx\putwordMDec\undefined \gdef\putwordMDec{December}\fi % \ifx\putwordDefmac\undefined \gdef\putwordDefmac{Macro}\fi \ifx\putwordDefspec\undefined \gdef\putwordDefspec{Special Form}\fi \ifx\putwordDefvar\undefined \gdef\putwordDefvar{Variable}\fi \ifx\putwordDefopt\undefined \gdef\putwordDefopt{User Option}\fi \ifx\putwordDeffunc\undefined \gdef\putwordDeffunc{Function}\fi % Give the space character the catcode for a space. \def\spaceisspace{\catcode`\ =10\relax} % Likewise for ^^M, the end of line character. \def\endlineisspace{\catcode13=10\relax} \chardef\dashChar = `\- \chardef\slashChar = `\/ \chardef\underChar = `\_ % Ignore a token. % \def\gobble#1{} % The following is used inside several \edef's. \def\makecsname#1{\expandafter\noexpand\csname#1\endcsname} % Hyphenation fixes. \hyphenation{ Flor-i-da Ghost-script Ghost-view Mac-OS Post-Script ap-pen-dix bit-map bit-maps data-base data-bases eshell fall-ing half-way long-est man-u-script man-u-scripts mini-buf-fer mini-buf-fers over-view par-a-digm par-a-digms rath-er rec-tan-gu-lar ro-bot-ics se-vere-ly set-up spa-ces spell-ing spell-ings stand-alone strong-est time-stamp time-stamps which-ever white-space wide-spread wrap-around } % Sometimes it is convenient to have everything in the transcript file % and nothing on the terminal. We don't just call \tracingall here, % since that produces some useless output on the terminal. We also make % some effort to order the tracing commands to reduce output in the log % file; cf. trace.sty in LaTeX. % \def\gloggingall{\begingroup \globaldefs = 1 \loggingall \endgroup}% \def\loggingall{% \tracingstats2 \tracingpages1 \tracinglostchars2 % 2 gives us more in etex \tracingparagraphs1 \tracingoutput1 \tracingmacros2 \tracingrestores1 \showboxbreadth\maxdimen \showboxdepth\maxdimen \ifx\eTeXversion\thisisundefined\else % etex gives us more logging \tracingscantokens1 \tracingifs1 \tracinggroups1 \tracingnesting2 \tracingassigns1 \fi \tracingcommands3 % 3 gives us more in etex \errorcontextlines16 }% % @errormsg{MSG}. Do the index-like expansions on MSG, but if things % aren't perfect, it's not the end of the world, being an error message, % after all. % \def\errormsg{\begingroup \indexnofonts \doerrormsg} \def\doerrormsg#1{\errmessage{#1}} % add check for \lastpenalty to plain's definitions. If the last thing % we did was a \nobreak, we don't want to insert more space. % \def\smallbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\smallskipamount \removelastskip\penalty-50\smallskip\fi\fi} \def\medbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\medskipamount \removelastskip\penalty-100\medskip\fi\fi} \def\bigbreak{\ifnum\lastpenalty<10000\par\ifdim\lastskip<\bigskipamount \removelastskip\penalty-200\bigskip\fi\fi} % Output routine % % For a final copy, take out the rectangles % that mark overfull boxes (in case you have decided % that the text looks ok even though it passes the margin). % \def\finalout{\overfullrule=0pt } \newdimen\outerhsize \newdimen\outervsize % set by the paper size routines \newdimen\topandbottommargin \topandbottommargin=.75in % Output a mark which sets \thischapter, \thissection and \thiscolor. % We dump everything together because we only have one kind of mark. % This works because we only use \botmark / \topmark, not \firstmark. % % A mark contains a subexpression of the \ifcase ... \fi construct. % \get*marks macros below extract the needed part using \ifcase. % % Another complication is to let the user choose whether \thischapter % (\thissection) refers to the chapter (section) in effect at the top % of a page, or that at the bottom of a page. % \domark is called twice inside \chapmacro, to add one % mark before the section break, and one after. % In the second call \prevchapterdefs is the same as \currentchapterdefs, % and \prevsectiondefs is the same as \currentsectiondefs. % Then if the page is not broken at the mark, some of the previous % section appears on the page, and we can get the name of this section % from \firstmark for @everyheadingmarks top. % @everyheadingmarks bottom uses \botmark. % % See page 260 of The TeXbook. \def\domark{% \toks0=\expandafter{\currentchapterdefs}% \toks2=\expandafter{\currentsectiondefs}% \toks4=\expandafter{\prevchapterdefs}% \toks6=\expandafter{\prevsectiondefs}% \toks8=\expandafter{\currentcolordefs}% \mark{% \the\toks0 \the\toks2 % 0: marks for @everyheadingmarks top \noexpand\or \the\toks4 \the\toks6 % 1: for @everyheadingmarks bottom \noexpand\else \the\toks8 % 2: color marks }% } % \gettopheadingmarks, \getbottomheadingmarks, % \getcolormarks - extract needed part of mark. % % \topmark doesn't work for the very first chapter (after the title % page or the contents), so we use \firstmark there -- this gets us % the mark with the chapter defs, unless the user sneaks in, e.g., % @setcolor (or @url, or @link, etc.) between @contents and the very % first @chapter. \def\gettopheadingmarks{% \ifcase0\the\savedtopmark\fi \ifx\thischapter\empty \ifcase0\firstmark\fi \fi } \def\getbottomheadingmarks{\ifcase1\botmark\fi} \def\getcolormarks{\ifcase2\the\savedtopmark\fi} % Avoid "undefined control sequence" errors. \def\currentchapterdefs{} \def\currentsectiondefs{} \def\currentsection{} \def\prevchapterdefs{} \def\prevsectiondefs{} \def\currentcolordefs{} % Margin to add to right of even pages, to left of odd pages. \newdimen\bindingoffset \newdimen\normaloffset \newdimen\txipagewidth \newdimen\txipageheight % Main output routine. % \chardef\PAGE = 255 \newtoks\defaultoutput \defaultoutput = {\savetopmark\onepageout{\pagecontents\PAGE}} \output=\expandafter{\the\defaultoutput} \newbox\headlinebox \newbox\footlinebox % When outputting the double column layout for indices, an output routine % is run several times, which hides the original value of \topmark. This % can lead to a page heading being output and duplicating the chapter heading % of the index. Hence, save the contents of \topmark at the beginning of % the output routine. The saved contents are valid until we actually % \shipout a page. % % (We used to run a short output routine to actually set \topmark and % \firstmark to the right values, but if this was called with an empty page % containing whatsits for writing index entries, the whatsits would be thrown % away and the index auxiliary file would remain empty.) % \newtoks\savedtopmark \newif\iftopmarksaved \topmarksavedtrue \def\savetopmark{% \iftopmarksaved\else \global\savedtopmark=\expandafter{\topmark}% \global\topmarksavedtrue \fi } % \onepageout takes a vbox as an argument. % \shipout a vbox for a single page, adding an optional header, footer % and footnote. This also causes index entries for this page to be written % to the auxiliary files. % \def\onepageout#1{% \hoffset=\normaloffset % \ifodd\pageno \advance\hoffset by \bindingoffset \else \advance\hoffset by -\bindingoffset\fi % \checkchapterpage % % Retrieve the information for the headings from the marks in the page, % and call Plain TeX's \makeheadline and \makefootline, which use the % values in \headline and \footline. % % Common context changes for both heading and footing. % Do this outside of the \shipout so @code etc. will be expanded in % the headline as they should be, not taken literally (outputting ''code). \def\commonheadfootline{\let\hsize=\txipagewidth \texinfochars} % \ifodd\pageno \getoddheadingmarks \else \getevenheadingmarks \fi \global\setbox\headlinebox = \vbox{\commonheadfootline \makeheadline}% \ifodd\pageno \getoddfootingmarks \else \getevenfootingmarks \fi \global\setbox\footlinebox = \vbox{\commonheadfootline \makefootline}% % {% % Set context for writing to auxiliary files like index files. % Have to do this stuff outside the \shipout because we want it to % take effect in \write's, yet the group defined by the \vbox ends % before the \shipout runs. % \atdummies % don't expand commands in the output. \turnoffactive \shipout\vbox{% % Do this early so pdf references go to the beginning of the page. \ifpdfmakepagedest \pdfdest name{\the\pageno} xyz\fi % \unvbox\headlinebox \pagebody{#1}% \ifdim\ht\footlinebox > 0pt % Only leave this space if the footline is nonempty. % (We lessened \vsize for it in \oddfootingyyy.) % The \baselineskip=24pt in plain's \makefootline has no effect. \vskip 24pt \unvbox\footlinebox \fi % }% }% \global\topmarksavedfalse \advancepageno \ifnum\outputpenalty>-20000 \else\dosupereject\fi } \newinsert\margin \dimen\margin=\maxdimen % Main part of page, including any footnotes \def\pagebody#1{\vbox to\txipageheight{\boxmaxdepth=\maxdepth #1}} {\catcode`\@ =11 \gdef\pagecontents#1{\ifvoid\topins\else\unvbox\topins\fi % marginal hacks, juha@viisa.uucp (Juha Takala) \ifvoid\margin\else % marginal info is present \rlap{\kern\hsize\vbox to\z@{\kern1pt\box\margin \vss}}\fi \dimen@=\dp#1\relax \unvbox#1\relax \ifvoid\footins\else\vskip\skip\footins\footnoterule \unvbox\footins\fi \ifr@ggedbottom \kern-\dimen@ \vfil \fi} } % Check if we are on the first page of a chapter. Used for printing headings. \newif\ifchapterpage \def\checkchapterpage{% % Get the chapter that was current at the end of the last page \ifcase1\the\savedtopmark\fi \let\prevchaptername\thischaptername % \ifodd\pageno \getoddheadingmarks \else \getevenheadingmarks \fi \let\curchaptername\thischaptername % \ifx\curchaptername\prevchaptername \chapterpagefalse \else \chapterpagetrue \fi } % Argument parsing % Parse an argument, then pass it to #1. The argument is the rest of % the input line (except we remove a trailing comment). #1 should be a % macro which expects an ordinary undelimited TeX argument. % For example, \def\foo{\parsearg\fooxxx}. % \def\parsearg{\parseargusing{}} \def\parseargusing#1#2{% \def\argtorun{#2}% \begingroup \obeylines \spaceisspace #1% \parseargline\empty% Insert the \empty token, see \finishparsearg below. } {\obeylines % \gdef\parseargline#1^^M{% \endgroup % End of the group started in \parsearg. \argremovecomment #1\comment\ArgTerm% }% } % First remove any @comment, then any @c comment. Pass the result on to % \argcheckspaces. \def\argremovecomment#1\comment#2\ArgTerm{\argremovec #1\c\ArgTerm} \def\argremovec#1\c#2\ArgTerm{\argcheckspaces#1\^^M\ArgTerm} % Each occurrence of `\^^M' or `\^^M' is replaced by a single space. % % \argremovec might leave us with trailing space, e.g., % @end itemize @c foo % This space token undergoes the same procedure and is eventually removed % by \finishparsearg. % \def\argcheckspaces#1\^^M{\argcheckspacesX#1\^^M \^^M} \def\argcheckspacesX#1 \^^M{\argcheckspacesY#1\^^M} \def\argcheckspacesY#1\^^M#2\^^M#3\ArgTerm{% \def\temp{#3}% \ifx\temp\empty % Do not use \next, perhaps the caller of \parsearg uses it; reuse \temp: \let\temp\finishparsearg \else \let\temp\argcheckspaces \fi % Put the space token in: \temp#1 #3\ArgTerm } % If a _delimited_ argument is enclosed in braces, they get stripped; so % to get _exactly_ the rest of the line, we had to prevent such situation. % We prepended an \empty token at the very beginning and we expand it now, % just before passing the control to \argtorun. % (Similarly, we have to think about #3 of \argcheckspacesY above: it is % either the null string, or it ends with \^^M---thus there is no danger % that a pair of braces would be stripped. % % But first, we have to remove the trailing space token. % \def\finishparsearg#1 \ArgTerm{\expandafter\argtorun\expandafter{#1}} % \parseargdef - define a command taking an argument on the line % % \parseargdef\foo{...} % is roughly equivalent to % \def\foo{\parsearg\Xfoo} % \def\Xfoo#1{...} \def\parseargdef#1{% \expandafter \doparseargdef \csname\string#1\endcsname #1% } \def\doparseargdef#1#2{% \def#2{\parsearg#1}% \def#1##1% } % Several utility definitions with active space: { \obeyspaces \gdef\obeyedspace{ } % Make each space character in the input produce a normal interword % space in the output. Don't allow a line break at this space, as this % is used only in environments like @example, where each line of input % should produce a line of output anyway. % \gdef\sepspaces{\obeyspaces\let =\tie} % If an index command is used in an @example environment, any spaces % therein should become regular spaces in the raw index file, not the % expansion of \tie (\leavevmode \penalty \@M \ ). \gdef\unsepspaces{\let =\space} } \def\flushcr{\ifx\par\lisppar \def\next##1{}\else \let\next=\relax \fi \next} % Define the framework for environments in texinfo.tex. It's used like this: % % \envdef\foo{...} % \def\Efoo{...} % % It's the responsibility of \envdef to insert \begingroup before the % actual body; @end closes the group after calling \Efoo. \envdef also % defines \thisenv, so the current environment is known; @end checks % whether the environment name matches. The \checkenv macro can also be % used to check whether the current environment is the one expected. % % Non-false conditionals (@iftex, @ifset) don't fit into this, so they % are not treated as environments; they don't open a group. (The % implementation of @end takes care not to call \endgroup in this % special case.) % At run-time, environments start with this: \def\startenvironment#1{\begingroup\def\thisenv{#1}} % initialize \let\thisenv\empty % ... but they get defined via ``\envdef\foo{...}'': \long\def\envdef#1#2{\def#1{\startenvironment#1#2}} \def\envparseargdef#1#2{\parseargdef#1{\startenvironment#1#2}} % Check whether we're in the right environment: \def\checkenv#1{% \def\temp{#1}% \ifx\thisenv\temp \else \badenverr \fi } % Environment mismatch, #1 expected: \def\badenverr{% \errhelp = \EMsimple \errmessage{This command can appear only \inenvironment\temp, not \inenvironment\thisenv}% } \def\inenvironment#1{% \ifx#1\empty outside of any environment% \else in environment \expandafter\string#1% \fi } % @end foo calls \checkenv and executes the definition of \Efoo. \parseargdef\end{ \if 1\csname iscond.#1\endcsname \else % The general wording of \badenverr may not be ideal. \expandafter\checkenv\csname#1\endcsname \csname E#1\endcsname \endgroup \fi } \newhelp\EMsimple{Press RETURN to continue.} % Be sure we're in horizontal mode when doing a tie, since we make space % equivalent to this in @example-like environments. Otherwise, a space % at the beginning of a line will start with \penalty -- and % since \penalty is valid in vertical mode, we'd end up putting the % penalty on the vertical list instead of in the new paragraph. {\catcode`@ = 11 % Avoid using \@M directly, because that causes trouble % if the definition is written into an index file. \global\let\tiepenalty = \@M \gdef\tie{\leavevmode\penalty\tiepenalty\ } } % @: forces normal size whitespace following. \def\:{\spacefactor=1000 } % @* forces a line break. \def\*{\unskip\hfil\break\hbox{}\ignorespaces} % @/ allows a line break. \let\/=\allowbreak % @. is an end-of-sentence period. \def\.{.\spacefactor=\endofsentencespacefactor\space} % @! is an end-of-sentence bang. \def\!{!\spacefactor=\endofsentencespacefactor\space} % @? is an end-of-sentence query. \def\?{?\spacefactor=\endofsentencespacefactor\space} % @frenchspacing on|off says whether to put extra space after punctuation. % \def\onword{on} \def\offword{off} % \parseargdef\frenchspacing{% \def\temp{#1}% \ifx\temp\onword \plainfrenchspacing \else\ifx\temp\offword \plainnonfrenchspacing \else \errhelp = \EMsimple \errmessage{Unknown @frenchspacing option `\temp', must be on|off}% \fi\fi } % @w prevents a word break. Without the \leavevmode, @w at the % beginning of a paragraph, when TeX is still in vertical mode, would % produce a whole line of output instead of starting the paragraph. \def\w#1{\leavevmode\hbox{#1}} % @group ... @end group forces ... to be all on one page, by enclosing % it in a TeX vbox. We use \vtop instead of \vbox to construct the box % to keep its height that of a normal line. According to the rules for % \topskip (p.114 of the TeXbook), the glue inserted is % max (\topskip - \ht (first item), 0). If that height is large, % therefore, no glue is inserted, and the space between the headline and % the text is small, which looks bad. % % Another complication is that the group might be very large. This can % cause the glue on the previous page to be unduly stretched, because it % does not have much material. In this case, it's better to add an % explicit \vfill so that the extra space is at the bottom. The % threshold for doing this is if the group is more than \vfilllimit % percent of a page (\vfilllimit can be changed inside of @tex). % \newbox\groupbox \def\vfilllimit{0.7} % \envdef\group{% \ifnum\catcode`\^^M=\active \else \errhelp = \groupinvalidhelp \errmessage{@group invalid in context where filling is enabled}% \fi \startsavinginserts % \setbox\groupbox = \vtop\bgroup % Do @comment since we are called inside an environment such as % @example, where each end-of-line in the input causes an % end-of-line in the output. We don't want the end-of-line after % the `@group' to put extra space in the output. Since @group % should appear on a line by itself (according to the Texinfo % manual), we don't worry about eating any user text. \comment } % % The \vtop produces a box with normal height and large depth; thus, TeX puts % \baselineskip glue before it, and (when the next line of text is done) % \lineskip glue after it. Thus, space below is not quite equal to space % above. But it's pretty close. \def\Egroup{% % To get correct interline space between the last line of the group % and the first line afterwards, we have to propagate \prevdepth. \endgraf % Not \par, as it may have been set to \lisppar. \global\dimen1 = \prevdepth \egroup % End the \vtop. \addgroupbox \prevdepth = \dimen1 \checkinserts } \def\addgroupbox{ % \dimen0 is the vertical size of the group's box. \dimen0 = \ht\groupbox \advance\dimen0 by \dp\groupbox % \dimen2 is how much space is left on the page (more or less). \dimen2 = \txipageheight \advance\dimen2 by -\pagetotal % if the group doesn't fit on the current page, and it's a big big % group, force a page break. \ifdim \dimen0 > \dimen2 \ifdim \pagetotal < \vfilllimit\txipageheight \page \fi \fi \box\groupbox } % % TeX puts in an \escapechar (i.e., `@') at the beginning of the help % message, so this ends up printing `@group can only ...'. % \newhelp\groupinvalidhelp{% group can only be used in environments such as @example,^^J% where each line of input produces a line of output.} % @need space-in-mils % forces a page break if there is not space-in-mils remaining. \newdimen\mil \mil=0.001in \parseargdef\need{% % Ensure vertical mode, so we don't make a big box in the middle of a % paragraph. \par % % If the @need value is less than one line space, it's useless. \dimen0 = #1\mil \dimen2 = \ht\strutbox \advance\dimen2 by \dp\strutbox \ifdim\dimen0 > \dimen2 % % Do a \strut just to make the height of this box be normal, so the % normal leading is inserted relative to the preceding line. % And a page break here is fine. \vtop to #1\mil{\strut\vfil}% % % TeX does not even consider page breaks if a penalty added to the % main vertical list is 10000 or more. But in order to see if the % empty box we just added fits on the page, we must make it consider % page breaks. On the other hand, we don't want to actually break the % page after the empty box. So we use a penalty of 9999. % % There is an extremely small chance that TeX will actually break the % page at this \penalty, if there are no other feasible breakpoints in % sight. (If the user is using lots of big @group commands, which % almost-but-not-quite fill up a page, TeX will have a hard time doing % good page breaking, for example.) However, I could not construct an % example where a page broke at this \penalty; if it happens in a real % document, then we can reconsider our strategy. \penalty9999 % % Back up by the size of the box, whether we did a page break or not. \kern -#1\mil % % Do not allow a page break right after this kern. \nobreak \fi } % @br forces paragraph break (and is undocumented). \let\br = \par % @page forces the start of a new page. % \def\page{\par\vfill\supereject} % @exdent text.... % outputs text on separate line in roman font, starting at standard page margin % This records the amount of indent in the innermost environment. % That's how much \exdent should take out. \newskip\exdentamount % This defn is used inside fill environments such as @defun. \parseargdef\exdent{\hfil\break\hbox{\kern -\exdentamount{\rm#1}}\hfil\break} % This defn is used inside nofill environments such as @example. \parseargdef\nofillexdent{{\advance \leftskip by -\exdentamount \leftline{\hskip\leftskip{\rm#1}}}} % @inmargin{WHICH}{TEXT} puts TEXT in the WHICH margin next to the current % paragraph. For more general purposes, use the \margin insertion % class. WHICH is `l' or `r'. Not documented, written for gawk manual. % \newskip\inmarginspacing \inmarginspacing=1cm \def\strutdepth{\dp\strutbox} % \def\doinmargin#1#2{\strut\vadjust{% \nobreak \kern-\strutdepth \vtop to \strutdepth{% \baselineskip=\strutdepth \vss % if you have multiple lines of stuff to put here, you'll need to % make the vbox yourself of the appropriate size. \ifx#1l% \llap{\ignorespaces #2\hskip\inmarginspacing}% \else \rlap{\hskip\hsize \hskip\inmarginspacing \ignorespaces #2}% \fi \null }% }} \def\inleftmargin{\doinmargin l} \def\inrightmargin{\doinmargin r} % % @inmargin{TEXT [, RIGHT-TEXT]} % (if RIGHT-TEXT is given, use TEXT for left page, RIGHT-TEXT for right; % else use TEXT for both). % \def\inmargin#1{\parseinmargin #1,,\finish} \def\parseinmargin#1,#2,#3\finish{% not perfect, but better than nothing. \setbox0 = \hbox{\ignorespaces #2}% \ifdim\wd0 > 0pt \def\lefttext{#1}% have both texts \def\righttext{#2}% \else \def\lefttext{#1}% have only one text \def\righttext{#1}% \fi % \ifodd\pageno \def\temp{\inrightmargin\righttext}% odd page -> outside is right margin \else \def\temp{\inleftmargin\lefttext}% \fi \temp } % @include FILE -- \input text of FILE. % \def\include{\parseargusing\filenamecatcodes\includezzz} \def\includezzz#1{% \pushthisfilestack \def\thisfile{#1}% {% \makevalueexpandable % we want to expand any @value in FILE. \turnoffactive % and allow special characters in the expansion \indexnofonts % Allow `@@' and other weird things in file names. \wlog{texinfo.tex: doing @include of #1^^J}% \edef\temp{\noexpand\input #1 }% % % This trickery is to read FILE outside of a group, in case it makes % definitions, etc. \expandafter }\temp \popthisfilestack } \def\filenamecatcodes{% \catcode`\\=\other \catcode`~=\other \catcode`^=\other \catcode`_=\other \catcode`|=\other \catcode`<=\other \catcode`>=\other \catcode`+=\other \catcode`-=\other \catcode`\`=\other \catcode`\'=\other } \def\pushthisfilestack{% \expandafter\pushthisfilestackX\popthisfilestack\StackTerm } \def\pushthisfilestackX{% \expandafter\pushthisfilestackY\thisfile\StackTerm } \def\pushthisfilestackY #1\StackTerm #2\StackTerm {% \gdef\popthisfilestack{\gdef\thisfile{#1}\gdef\popthisfilestack{#2}}% } \def\popthisfilestack{\errthisfilestackempty} \def\errthisfilestackempty{\errmessage{Internal error: the stack of filenames is empty.}} % \def\thisfile{} % @center line % outputs that line, centered. % \parseargdef\center{% \ifhmode \let\centersub\centerH \else \let\centersub\centerV \fi \centersub{\hfil \ignorespaces#1\unskip \hfil}% \let\centersub\relax % don't let the definition persist, just in case } \def\centerH#1{{% \hfil\break \advance\hsize by -\leftskip \advance\hsize by -\rightskip \line{#1}% \break }} % \newcount\centerpenalty \def\centerV#1{% % The idea here is the same as in \startdefun, \cartouche, etc.: if % @center is the first thing after a section heading, we need to wipe % out the negative parskip inserted by \sectionheading, but still % prevent a page break here. \centerpenalty = \lastpenalty \ifnum\centerpenalty>10000 \vskip\parskip \fi \ifnum\centerpenalty>9999 \penalty\centerpenalty \fi \line{\kern\leftskip #1\kern\rightskip}% } % @sp n outputs n lines of vertical space % \parseargdef\sp{\vskip #1\baselineskip} % @comment ...line which is ignored... % @c is the same as @comment % @ignore ... @end ignore is another way to write a comment \def\c{\begingroup \catcode`\^^M=\active% \catcode`\@=\other \catcode`\{=\other \catcode`\}=\other% \cxxx} {\catcode`\^^M=\active \gdef\cxxx#1^^M{\endgroup}} % \let\comment\c % @paragraphindent NCHARS % We'll use ems for NCHARS, close enough. % NCHARS can also be the word `asis' or `none'. % We cannot feasibly implement @paragraphindent asis, though. % \def\asisword{asis} % no translation, these are keywords \def\noneword{none} % \parseargdef\paragraphindent{% \def\temp{#1}% \ifx\temp\asisword \else \ifx\temp\noneword \defaultparindent = 0pt \else \defaultparindent = #1em \fi \fi \parindent = \defaultparindent } % @exampleindent NCHARS % We'll use ems for NCHARS like @paragraphindent. % It seems @exampleindent asis isn't necessary, but % I preserve it to make it similar to @paragraphindent. \parseargdef\exampleindent{% \def\temp{#1}% \ifx\temp\asisword \else \ifx\temp\noneword \lispnarrowing = 0pt \else \lispnarrowing = #1em \fi \fi } % @firstparagraphindent WORD % If WORD is `none', then suppress indentation of the first paragraph % after a section heading. If WORD is `insert', then do indent at such % paragraphs. % % The paragraph indentation is suppressed or not by calling % \suppressfirstparagraphindent, which the sectioning commands do. % We switch the definition of this back and forth according to WORD. % By default, we suppress indentation. % \def\suppressfirstparagraphindent{\dosuppressfirstparagraphindent} \def\insertword{insert} % \parseargdef\firstparagraphindent{% \def\temp{#1}% \ifx\temp\noneword \let\suppressfirstparagraphindent = \dosuppressfirstparagraphindent \else\ifx\temp\insertword \let\suppressfirstparagraphindent = \relax \else \errhelp = \EMsimple \errmessage{Unknown @firstparagraphindent option `\temp'}% \fi\fi } % Here is how we actually suppress indentation. Redefine \everypar to % \kern backwards by \parindent, and then reset itself to empty. % % We also make \indent itself not actually do anything until the next % paragraph. % \gdef\dosuppressfirstparagraphindent{% \gdef\indent {\restorefirstparagraphindent \indent}% \gdef\noindent{\restorefirstparagraphindent \noindent}% \global\everypar = {\kern -\parindent \restorefirstparagraphindent}% } % \gdef\restorefirstparagraphindent{% \global\let\indent = \ptexindent \global\let\noindent = \ptexnoindent \global\everypar = {}% } % @refill is a no-op. \let\refill=\relax % @setfilename INFO-FILENAME - ignored \let\setfilename=\comment % @bye. \outer\def\bye{\chappager\pagelabels\tracingstats=1\ptexend} \message{pdf,} % adobe `portable' document format \newcount\tempnum \newcount\lnkcount \newtoks\filename \newcount\filenamelength \newcount\pgn \newtoks\toksA \newtoks\toksB \newtoks\toksC \newtoks\toksD \newbox\boxA \newbox\boxB \newcount\countA \newif\ifpdf \newif\ifpdfmakepagedest % % For LuaTeX % \newif\iftxiuseunicodedestname \txiuseunicodedestnamefalse % For pdfTeX etc. \ifx\luatexversion\thisisundefined \else % Use Unicode destination names \txiuseunicodedestnametrue % Escape PDF strings with converting UTF-16 from UTF-8 \begingroup \catcode`\%=12 \directlua{ function UTF16oct(str) tex.sprint(string.char(0x5c) .. '376' .. string.char(0x5c) .. '377') for c in string.utfvalues(str) do if c < 0x10000 then tex.sprint( string.format(string.char(0x5c) .. string.char(0x25) .. '03o' .. string.char(0x5c) .. string.char(0x25) .. '03o', math.floor(c / 256), math.floor(c % 256))) else c = c - 0x10000 local c_hi = c / 1024 + 0xd800 local c_lo = c % 1024 + 0xdc00 tex.sprint( string.format(string.char(0x5c) .. string.char(0x25) .. '03o' .. string.char(0x5c) .. string.char(0x25) .. '03o' .. string.char(0x5c) .. string.char(0x25) .. '03o' .. string.char(0x5c) .. string.char(0x25) .. '03o', math.floor(c_hi / 256), math.floor(c_hi % 256), math.floor(c_lo / 256), math.floor(c_lo % 256))) end end end } \endgroup \def\pdfescapestrutfsixteen#1{\directlua{UTF16oct('\luaescapestring{#1}')}} % Escape PDF strings without converting \begingroup \directlua{ function PDFescstr(str) for c in string.bytes(str) do if c <= 0x20 or c >= 0x80 or c == 0x28 or c == 0x29 or c == 0x5c then tex.sprint(-2, string.format(string.char(0x5c) .. string.char(0x25) .. '03o', c)) else tex.sprint(-2, string.char(c)) end end end } % The -2 in the arguments here gives all the input to TeX catcode 12 % (other) or 10 (space), preventing undefined control sequence errors. See % https://lists.gnu.org/archive/html/bug-texinfo/2019-08/msg00031.html % \endgroup \def\pdfescapestring#1{\directlua{PDFescstr('\luaescapestring{#1}')}} \ifnum\luatexversion>84 % For LuaTeX >= 0.85 \def\pdfdest{\pdfextension dest} \let\pdfoutput\outputmode \def\pdfliteral{\pdfextension literal} \def\pdfcatalog{\pdfextension catalog} \def\pdftexversion{\numexpr\pdffeedback version\relax} \let\pdfximage\saveimageresource \let\pdfrefximage\useimageresource \let\pdflastximage\lastsavedimageresourceindex \def\pdfendlink{\pdfextension endlink\relax} \def\pdfoutline{\pdfextension outline} \def\pdfstartlink{\pdfextension startlink} \def\pdffontattr{\pdfextension fontattr} \def\pdfobj{\pdfextension obj} \def\pdflastobj{\numexpr\pdffeedback lastobj\relax} \let\pdfpagewidth\pagewidth \let\pdfpageheight\pageheight \edef\pdfhorigin{\pdfvariable horigin} \edef\pdfvorigin{\pdfvariable vorigin} \fi \fi % when pdftex is run in dvi mode, \pdfoutput is defined (so \pdfoutput=1 % can be set). So we test for \relax and 0 as well as being undefined. \ifx\pdfoutput\thisisundefined \else \ifx\pdfoutput\relax \else \ifcase\pdfoutput \else \pdftrue \fi \fi \fi \newif\ifpdforxetex \pdforxetexfalse \ifpdf \pdforxetextrue \fi \ifx\XeTeXrevision\thisisundefined\else \pdforxetextrue \fi % Output page labels information. % See PDF reference v.1.7 p.594, section 8.3.1. \ifpdf \def\pagelabels{% \def\title{0 << /P (T-) /S /D >>}% \edef\roman{\the\romancount << /S /r >>}% \edef\arabic{\the\arabiccount << /S /D >>}% % % Page label ranges must be increasing. Remove any duplicates. % (There is a slight chance of this being wrong if e.g. there is % a @contents but no @titlepage, etc.) % \ifnum\romancount=0 \def\roman{}\fi \ifnum\arabiccount=0 \def\title{}% \else \ifnum\romancount=\arabiccount \def\roman{}\fi \fi % \ifnum\romancount<\arabiccount \pdfcatalog{/PageLabels << /Nums [\title \roman \arabic ] >> }\relax \else \pdfcatalog{/PageLabels << /Nums [\title \arabic \roman ] >> }\relax \fi } \else \let\pagelabels\relax \fi \newcount\pagecount \pagecount=0 \newcount\romancount \romancount=0 \newcount\arabiccount \arabiccount=0 \ifpdf \let\ptxadvancepageno\advancepageno \def\advancepageno{% \ptxadvancepageno\global\advance\pagecount by 1 } \fi % PDF uses PostScript string constants for the names of xref targets, % for display in the outlines, and in other places. Thus, we have to % double any backslashes. Otherwise, a name like "\node" will be % interpreted as a newline (\n), followed by o, d, e. Not good. % % See http://www.ntg.nl/pipermail/ntg-pdftex/2004-July/000654.html and % related messages. The final outcome is that it is up to the TeX user % to double the backslashes and otherwise make the string valid, so % that's what we do. pdftex 1.30.0 (ca.2005) introduced a primitive to % do this reliably, so we use it. % #1 is a control sequence in which to do the replacements, % which we \xdef. \def\txiescapepdf#1{% \ifx\pdfescapestring\thisisundefined % No primitive available; should we give a warning or log? % Many times it won't matter. \xdef#1{#1}% \else % The expandable \pdfescapestring primitive escapes parentheses, % backslashes, and other special chars. \xdef#1{\pdfescapestring{#1}}% \fi } \def\txiescapepdfutfsixteen#1{% \ifx\pdfescapestrutfsixteen\thisisundefined % No UTF-16 converting macro available. \txiescapepdf{#1}% \else \xdef#1{\pdfescapestrutfsixteen{#1}}% \fi } \newhelp\nopdfimagehelp{Texinfo supports .png, .jpg, .jpeg, and .pdf images with PDF output, and none of those formats could be found. (.eps cannot be supported due to the design of the PDF format; use regular TeX (DVI output) for that.)} \ifpdf % % Color manipulation macros using ideas from pdfcolor.tex, % except using rgb instead of cmyk; the latter is said to render as a % very dark gray on-screen and a very dark halftone in print, instead % of actual black. The dark red here is dark enough to print on paper as % nearly black, but still distinguishable for online viewing. We use % black by default, though. \def\rgbDarkRed{0.50 0.09 0.12} \def\rgbBlack{0 0 0} % % rg sets the color for filling (usual text, etc.); % RG sets the color for stroking (thin rules, e.g., normal _'s). \def\pdfsetcolor#1{\pdfliteral{#1 rg #1 RG}} % % Set color, and create a mark which defines \thiscolor accordingly, % so that \makeheadline knows which color to restore. \def\setcolor#1{% \xdef\currentcolordefs{\gdef\noexpand\thiscolor{#1}}% \domark \pdfsetcolor{#1}% } % \def\maincolor{\rgbBlack} \pdfsetcolor{\maincolor} \edef\thiscolor{\maincolor} \def\currentcolordefs{} % \def\makefootline{% \baselineskip24pt \line{\pdfsetcolor{\maincolor}\the\footline}% } % \def\makeheadline{% \vbox to 0pt{% \vskip-22.5pt \line{% \vbox to8.5pt{}% % Extract \thiscolor definition from the marks. \getcolormarks % Typeset the headline with \maincolor, then restore the color. \pdfsetcolor{\maincolor}\the\headline\pdfsetcolor{\thiscolor}% }% \vss }% \nointerlineskip } % % \pdfcatalog{/PageMode /UseOutlines} % % #1 is image name, #2 width (might be empty/whitespace), #3 height (ditto). \def\dopdfimage#1#2#3{% \def\pdfimagewidth{#2}\setbox0 = \hbox{\ignorespaces #2}% \def\pdfimageheight{#3}\setbox2 = \hbox{\ignorespaces #3}% % % pdftex (and the PDF format) support .pdf, .png, .jpg (among % others). Let's try in that order, PDF first since if % someone has a scalable image, presumably better to use that than a % bitmap. \let\pdfimgext=\empty \begingroup \openin 1 #1.pdf \ifeof 1 \openin 1 #1.PDF \ifeof 1 \openin 1 #1.png \ifeof 1 \openin 1 #1.jpg \ifeof 1 \openin 1 #1.jpeg \ifeof 1 \openin 1 #1.JPG \ifeof 1 \errhelp = \nopdfimagehelp \errmessage{Could not find image file #1 for pdf}% \else \gdef\pdfimgext{JPG}% \fi \else \gdef\pdfimgext{jpeg}% \fi \else \gdef\pdfimgext{jpg}% \fi \else \gdef\pdfimgext{png}% \fi \else \gdef\pdfimgext{PDF}% \fi \else \gdef\pdfimgext{pdf}% \fi \closein 1 \endgroup % % without \immediate, ancient pdftex seg faults when the same image is % included twice. (Version 3.14159-pre-1.0-unofficial-20010704.) \ifnum\pdftexversion < 14 \immediate\pdfimage \else \immediate\pdfximage \fi \ifdim \wd0 >0pt width \pdfimagewidth \fi \ifdim \wd2 >0pt height \pdfimageheight \fi \ifnum\pdftexversion<13 #1.\pdfimgext \else {#1.\pdfimgext}% \fi \ifnum\pdftexversion < 14 \else \pdfrefximage \pdflastximage \fi} % \def\setpdfdestname#1{{% % We have to set dummies so commands such as @code, and characters % such as \, aren't expanded when present in a section title. \indexnofonts \makevalueexpandable \turnoffactive \iftxiuseunicodedestname \ifx \declaredencoding \latone % Pass through Latin-1 characters. % LuaTeX with byte wise I/O converts Latin-1 characters to Unicode. \else \ifx \declaredencoding \utfeight % Pass through Unicode characters. \else % Use ASCII approximations in destination names. \passthroughcharsfalse \fi \fi \else % Use ASCII approximations in destination names. \passthroughcharsfalse \fi \def\pdfdestname{#1}% \txiescapepdf\pdfdestname }} % \def\setpdfoutlinetext#1{{% \indexnofonts \makevalueexpandable \turnoffactive \ifx \declaredencoding \latone % The PDF format can use an extended form of Latin-1 in bookmark % strings. See Appendix D of the PDF Reference, Sixth Edition, for % the "PDFDocEncoding". \passthroughcharstrue % Pass through Latin-1 characters. % LuaTeX: Convert to Unicode % pdfTeX: Use Latin-1 as PDFDocEncoding \def\pdfoutlinetext{#1}% \else \ifx \declaredencoding \utfeight \ifx\luatexversion\thisisundefined % For pdfTeX with UTF-8. % TODO: the PDF format can use UTF-16 in bookmark strings, % but the code for this isn't done yet. % Use ASCII approximations. \passthroughcharsfalse \def\pdfoutlinetext{#1}% \else % For LuaTeX with UTF-8. % Pass through Unicode characters for title texts. \passthroughcharstrue \def\pdfoutlinetext{#1}% \fi \else % For non-Latin-1 or non-UTF-8 encodings. % Use ASCII approximations. \passthroughcharsfalse \def\pdfoutlinetext{#1}% \fi \fi % LuaTeX: Convert to UTF-16 % pdfTeX: Use Latin-1 as PDFDocEncoding \txiescapepdfutfsixteen\pdfoutlinetext }} % \def\pdfmkdest#1{% \setpdfdestname{#1}% \safewhatsit{\pdfdest name{\pdfdestname} xyz}% } % % used to mark target names; must be expandable. \def\pdfmkpgn#1{#1} % % by default, use black for everything. \def\urlcolor{\rgbBlack} \def\linkcolor{\rgbBlack} \def\endlink{\setcolor{\maincolor}\pdfendlink} % % Adding outlines to PDF; macros for calculating structure of outlines % come from Petr Olsak \def\expnumber#1{\expandafter\ifx\csname#1\endcsname\relax 0% \else \csname#1\endcsname \fi} \def\advancenumber#1{\tempnum=\expnumber{#1}\relax \advance\tempnum by 1 \expandafter\xdef\csname#1\endcsname{\the\tempnum}} % % #1 is the section text, which is what will be displayed in the % outline by the pdf viewer. #2 is the pdf expression for the number % of subentries (or empty, for subsubsections). #3 is the node text, % which might be empty if this toc entry had no corresponding node. % #4 is the page number % \def\dopdfoutline#1#2#3#4{% % Generate a link to the node text if that exists; else, use the % page number. We could generate a destination for the section % text in the case where a section has no node, but it doesn't % seem worth the trouble, since most documents are normally structured. \setpdfoutlinetext{#1} \setpdfdestname{#3} \ifx\pdfdestname\empty \def\pdfdestname{#4}% \fi % \pdfoutline goto name{\pdfmkpgn{\pdfdestname}}#2{\pdfoutlinetext}% } % \def\pdfmakeoutlines{% \begingroup % Read toc silently, to get counts of subentries for \pdfoutline. \def\partentry##1##2##3##4{}% ignore parts in the outlines \def\numchapentry##1##2##3##4{% \def\thischapnum{##2}% \def\thissecnum{0}% \def\thissubsecnum{0}% }% \def\numsecentry##1##2##3##4{% \advancenumber{chap\thischapnum}% \def\thissecnum{##2}% \def\thissubsecnum{0}% }% \def\numsubsecentry##1##2##3##4{% \advancenumber{sec\thissecnum}% \def\thissubsecnum{##2}% }% \def\numsubsubsecentry##1##2##3##4{% \advancenumber{subsec\thissubsecnum}% }% \def\thischapnum{0}% \def\thissecnum{0}% \def\thissubsecnum{0}% % % use \def rather than \let here because we redefine \chapentry et % al. a second time, below. \def\appentry{\numchapentry}% \def\appsecentry{\numsecentry}% \def\appsubsecentry{\numsubsecentry}% \def\appsubsubsecentry{\numsubsubsecentry}% \def\unnchapentry{\numchapentry}% \def\unnsecentry{\numsecentry}% \def\unnsubsecentry{\numsubsecentry}% \def\unnsubsubsecentry{\numsubsubsecentry}% \readdatafile{toc}% % % Read toc second time, this time actually producing the outlines. % The `-' means take the \expnumber as the absolute number of % subentries, which we calculated on our first read of the .toc above. % % We use the node names as the destinations. % % Currently we prefix the section name with the section number % for chapter and appendix headings only in order to avoid too much % horizontal space being required in the PDF viewer. \def\numchapentry##1##2##3##4{% \dopdfoutline{##2 ##1}{count-\expnumber{chap##2}}{##3}{##4}}% \def\unnchapentry##1##2##3##4{% \dopdfoutline{##1}{count-\expnumber{chap##2}}{##3}{##4}}% \def\numsecentry##1##2##3##4{% \dopdfoutline{##1}{count-\expnumber{sec##2}}{##3}{##4}}% \def\numsubsecentry##1##2##3##4{% \dopdfoutline{##1}{count-\expnumber{subsec##2}}{##3}{##4}}% \def\numsubsubsecentry##1##2##3##4{% count is always zero \dopdfoutline{##1}{}{##3}{##4}}% % % PDF outlines are displayed using system fonts, instead of % document fonts. Therefore we cannot use special characters, % since the encoding is unknown. For example, the eogonek from % Latin 2 (0xea) gets translated to a | character. Info from % Staszek Wawrykiewicz, 19 Jan 2004 04:09:24 +0100. % % TODO this right, we have to translate 8-bit characters to % their "best" equivalent, based on the @documentencoding. Too % much work for too little return. Just use the ASCII equivalents % we use for the index sort strings. % \indexnofonts \setupdatafile % We can have normal brace characters in the PDF outlines, unlike % Texinfo index files. So set that up. \def\{{\lbracecharliteral}% \def\}{\rbracecharliteral}% \catcode`\\=\active \otherbackslash \input \tocreadfilename \endgroup } {\catcode`[=1 \catcode`]=2 \catcode`{=\other \catcode`}=\other \gdef\lbracecharliteral[{]% \gdef\rbracecharliteral[}]% ] % \def\skipspaces#1{\def\PP{#1}\def\D{|}% \ifx\PP\D\let\nextsp\relax \else\let\nextsp\skipspaces \addtokens{\filename}{\PP}% \advance\filenamelength by 1 \fi \nextsp} \def\getfilename#1{% \filenamelength=0 % If we don't expand the argument now, \skipspaces will get % snagged on things like "@value{foo}". \edef\temp{#1}% \expandafter\skipspaces\temp|\relax } \ifnum\pdftexversion < 14 \let \startlink \pdfannotlink \else \let \startlink \pdfstartlink \fi % make a live url in pdf output. \def\pdfurl#1{% \begingroup % it seems we really need yet another set of dummies; have not % tried to figure out what each command should do in the context % of @url. for now, just make @/ a no-op, that's the only one % people have actually reported a problem with. % \normalturnoffactive \def\@{@}% \let\/=\empty \makevalueexpandable % do we want to go so far as to use \indexnofonts instead of just % special-casing \var here? \def\var##1{##1}% % \leavevmode\setcolor{\urlcolor}% \startlink attr{/Border [0 0 0]}% user{/Subtype /Link /A << /S /URI /URI (#1) >>}% \endgroup} % \pdfgettoks - Surround page numbers in #1 with @pdflink. #1 may % be a simple number, or a list of numbers in the case of an index % entry. \def\pdfgettoks#1.{\setbox\boxA=\hbox{\toksA={#1.}\toksB={}\maketoks}} \def\addtokens#1#2{\edef\addtoks{\noexpand#1={\the#1#2}}\addtoks} \def\adn#1{\addtokens{\toksC}{#1}\global\countA=1\let\next=\maketoks} \def\poptoks#1#2|ENDTOKS|{\let\first=#1\toksD={#1}\toksA={#2}} \def\maketoks{% \expandafter\poptoks\the\toksA|ENDTOKS|\relax \ifx\first0\adn0 \else\ifx\first1\adn1 \else\ifx\first2\adn2 \else\ifx\first3\adn3 \else\ifx\first4\adn4 \else\ifx\first5\adn5 \else\ifx\first6\adn6 \else\ifx\first7\adn7 \else\ifx\first8\adn8 \else\ifx\first9\adn9 \else \ifnum0=\countA\else\makelink\fi \ifx\first.\let\next=\done\else \let\next=\maketoks \addtokens{\toksB}{\the\toksD} \ifx\first,\addtokens{\toksB}{\space}\fi \fi \fi\fi\fi\fi\fi\fi\fi\fi\fi\fi \next} \def\makelink{\addtokens{\toksB}% {\noexpand\pdflink{\the\toksC}}\toksC={}\global\countA=0} \def\pdflink#1{% \startlink attr{/Border [0 0 0]} goto name{\pdfmkpgn{#1}} \setcolor{\linkcolor}#1\endlink} \def\done{\edef\st{\global\noexpand\toksA={\the\toksB}}\st} \else % non-pdf mode \let\pdfmkdest = \gobble \let\pdfurl = \gobble \let\endlink = \relax \let\setcolor = \gobble \let\pdfsetcolor = \gobble \let\pdfmakeoutlines = \relax \fi % \ifx\pdfoutput % % For XeTeX % \ifx\XeTeXrevision\thisisundefined \else % % XeTeX version check % \ifnum\strcmp{\the\XeTeXversion\XeTeXrevision}{0.99996}>-1 % TeX Live 2016 contains XeTeX 0.99996 and xdvipdfmx 20160307. % It can use the `dvipdfmx:config' special (from TeX Live SVN r40941). % For avoiding PDF destination name replacement, we use this special % instead of xdvipdfmx's command line option `-C 0x0010'. \special{dvipdfmx:config C 0x0010} % XeTeX 0.99995+ comes with xdvipdfmx 20160307+. % It can handle Unicode destination names for PDF. \txiuseunicodedestnametrue \else % XeTeX < 0.99996 (TeX Live < 2016) cannot use the % `dvipdfmx:config' special. % So for avoiding PDF destination name replacement, % xdvipdfmx's command line option `-C 0x0010' is necessary. % % XeTeX < 0.99995 can not handle Unicode destination names for PDF % because xdvipdfmx 20150315 has a UTF-16 conversion issue. % It is fixed by xdvipdfmx 20160106 (TeX Live SVN r39753). \txiuseunicodedestnamefalse \fi % % Color support % \def\rgbDarkRed{0.50 0.09 0.12} \def\rgbBlack{0 0 0} % \def\pdfsetcolor#1{\special{pdf:scolor [#1]}} % % Set color, and create a mark which defines \thiscolor accordingly, % so that \makeheadline knows which color to restore. \def\setcolor#1{% \xdef\currentcolordefs{\gdef\noexpand\thiscolor{#1}}% \domark \pdfsetcolor{#1}% } % \def\maincolor{\rgbBlack} \pdfsetcolor{\maincolor} \edef\thiscolor{\maincolor} \def\currentcolordefs{} % \def\makefootline{% \baselineskip24pt \line{\pdfsetcolor{\maincolor}\the\footline}% } % \def\makeheadline{% \vbox to 0pt{% \vskip-22.5pt \line{% \vbox to8.5pt{}% % Extract \thiscolor definition from the marks. \getcolormarks % Typeset the headline with \maincolor, then restore the color. \pdfsetcolor{\maincolor}\the\headline\pdfsetcolor{\thiscolor}% }% \vss }% \nointerlineskip } % % PDF outline support % % Emulate pdfTeX primitive \def\pdfdest name#1 xyz{% \special{pdf:dest (#1) [@thispage /XYZ @xpos @ypos null]}% } % \def\setpdfdestname#1{{% % We have to set dummies so commands such as @code, and characters % such as \, aren't expanded when present in a section title. \indexnofonts \makevalueexpandable \turnoffactive \iftxiuseunicodedestname % Pass through Unicode characters. \else % Use ASCII approximations in destination names. \passthroughcharsfalse \fi \def\pdfdestname{#1}% \txiescapepdf\pdfdestname }} % \def\setpdfoutlinetext#1{{% \turnoffactive % Always use Unicode characters in title texts. \def\pdfoutlinetext{#1}% % For XeTeX, xdvipdfmx converts to UTF-16. % So we do not convert. \txiescapepdf\pdfoutlinetext }} % \def\pdfmkdest#1{% \setpdfdestname{#1}% \safewhatsit{\pdfdest name{\pdfdestname} xyz}% } % % by default, use black for everything. \def\urlcolor{\rgbBlack} \def\linkcolor{\rgbBlack} \def\endlink{\setcolor{\maincolor}\pdfendlink} % \def\dopdfoutline#1#2#3#4{% \setpdfoutlinetext{#1} \setpdfdestname{#3} \ifx\pdfdestname\empty \def\pdfdestname{#4}% \fi % \special{pdf:out [-] #2 << /Title (\pdfoutlinetext) /A << /S /GoTo /D (\pdfdestname) >> >> }% } % \def\pdfmakeoutlines{% \begingroup % % For XeTeX, counts of subentries are not necessary. % Therefore, we read toc only once. % % We use node names as destinations. % % Currently we prefix the section name with the section number % for chapter and appendix headings only in order to avoid too much % horizontal space being required in the PDF viewer. \def\partentry##1##2##3##4{}% ignore parts in the outlines \def\numchapentry##1##2##3##4{% \dopdfoutline{##2 ##1}{1}{##3}{##4}}% \def\numsecentry##1##2##3##4{% \dopdfoutline{##1}{2}{##3}{##4}}% \def\numsubsecentry##1##2##3##4{% \dopdfoutline{##1}{3}{##3}{##4}}% \def\numsubsubsecentry##1##2##3##4{% \dopdfoutline{##1}{4}{##3}{##4}}% % \let\appentry\numchapentry% \let\appsecentry\numsecentry% \let\appsubsecentry\numsubsecentry% \let\appsubsubsecentry\numsubsubsecentry% \def\unnchapentry##1##2##3##4{% \dopdfoutline{##1}{1}{##3}{##4}}% \let\unnsecentry\numsecentry% \let\unnsubsecentry\numsubsecentry% \let\unnsubsubsecentry\numsubsubsecentry% % % For XeTeX, xdvipdfmx converts strings to UTF-16. % Therefore, the encoding and the language may not be considered. % \indexnofonts \setupdatafile % We can have normal brace characters in the PDF outlines, unlike % Texinfo index files. So set that up. \def\{{\lbracecharliteral}% \def\}{\rbracecharliteral}% \catcode`\\=\active \otherbackslash \input \tocreadfilename \endgroup } {\catcode`[=1 \catcode`]=2 \catcode`{=\other \catcode`}=\other \gdef\lbracecharliteral[{]% \gdef\rbracecharliteral[}]% ] \special{pdf:docview << /PageMode /UseOutlines >> } % ``\special{pdf:tounicode ...}'' is not necessary % because xdvipdfmx converts strings from UTF-8 to UTF-16 without it. % However, due to a UTF-16 conversion issue of xdvipdfmx 20150315, % ``\special{pdf:dest ...}'' cannot handle non-ASCII strings. % It is fixed by xdvipdfmx 20160106 (TeX Live SVN r39753). % \def\skipspaces#1{\def\PP{#1}\def\D{|}% \ifx\PP\D\let\nextsp\relax \else\let\nextsp\skipspaces \addtokens{\filename}{\PP}% \advance\filenamelength by 1 \fi \nextsp} \def\getfilename#1{% \filenamelength=0 % If we don't expand the argument now, \skipspaces will get % snagged on things like "@value{foo}". \edef\temp{#1}% \expandafter\skipspaces\temp|\relax } % make a live url in pdf output. \def\pdfurl#1{% \begingroup % it seems we really need yet another set of dummies; have not % tried to figure out what each command should do in the context % of @url. for now, just make @/ a no-op, that's the only one % people have actually reported a problem with. % \normalturnoffactive \def\@{@}% \let\/=\empty \makevalueexpandable % do we want to go so far as to use \indexnofonts instead of just % special-casing \var here? \def\var##1{##1}% % \leavevmode\setcolor{\urlcolor}% \special{pdf:bann << /Border [0 0 0] /Subtype /Link /A << /S /URI /URI (#1) >> >>}% \endgroup} \def\endlink{\setcolor{\maincolor}\special{pdf:eann}} \def\pdfgettoks#1.{\setbox\boxA=\hbox{\toksA={#1.}\toksB={}\maketoks}} \def\addtokens#1#2{\edef\addtoks{\noexpand#1={\the#1#2}}\addtoks} \def\adn#1{\addtokens{\toksC}{#1}\global\countA=1\let\next=\maketoks} \def\poptoks#1#2|ENDTOKS|{\let\first=#1\toksD={#1}\toksA={#2}} \def\maketoks{% \expandafter\poptoks\the\toksA|ENDTOKS|\relax \ifx\first0\adn0 \else\ifx\first1\adn1 \else\ifx\first2\adn2 \else\ifx\first3\adn3 \else\ifx\first4\adn4 \else\ifx\first5\adn5 \else\ifx\first6\adn6 \else\ifx\first7\adn7 \else\ifx\first8\adn8 \else\ifx\first9\adn9 \else \ifnum0=\countA\else\makelink\fi \ifx\first.\let\next=\done\else \let\next=\maketoks \addtokens{\toksB}{\the\toksD} \ifx\first,\addtokens{\toksB}{\space}\fi \fi \fi\fi\fi\fi\fi\fi\fi\fi\fi\fi \next} \def\makelink{\addtokens{\toksB}% {\noexpand\pdflink{\the\toksC}}\toksC={}\global\countA=0} \def\pdflink#1{% \special{pdf:bann << /Border [0 0 0] /Type /Annot /Subtype /Link /A << /S /GoTo /D (#1) >> >>}% \setcolor{\linkcolor}#1\endlink} \def\done{\edef\st{\global\noexpand\toksA={\the\toksB}}\st} % % % @image support % % #1 is image name, #2 width (might be empty/whitespace), #3 height (ditto). \def\doxeteximage#1#2#3{% \def\xeteximagewidth{#2}\setbox0 = \hbox{\ignorespaces #2}% \def\xeteximageheight{#3}\setbox2 = \hbox{\ignorespaces #3}% % % XeTeX (and the PDF format) supports .pdf, .png, .jpg (among % others). Let's try in that order, PDF first since if % someone has a scalable image, presumably better to use that than a % bitmap. \let\xeteximgext=\empty \begingroup \openin 1 #1.pdf \ifeof 1 \openin 1 #1.PDF \ifeof 1 \openin 1 #1.png \ifeof 1 \openin 1 #1.jpg \ifeof 1 \openin 1 #1.jpeg \ifeof 1 \openin 1 #1.JPG \ifeof 1 \errmessage{Could not find image file #1 for XeTeX}% \else \gdef\xeteximgext{JPG}% \fi \else \gdef\xeteximgext{jpeg}% \fi \else \gdef\xeteximgext{jpg}% \fi \else \gdef\xeteximgext{png}% \fi \else \gdef\xeteximgext{PDF}% \fi \else \gdef\xeteximgext{pdf}% \fi \closein 1 \endgroup % \def\xetexpdfext{pdf}% \ifx\xeteximgext\xetexpdfext \XeTeXpdffile "#1".\xeteximgext "" \else \def\xetexpdfext{PDF}% \ifx\xeteximgext\xetexpdfext \XeTeXpdffile "#1".\xeteximgext "" \else \XeTeXpicfile "#1".\xeteximgext "" \fi \fi \ifdim \wd0 >0pt width \xeteximagewidth \fi \ifdim \wd2 >0pt height \xeteximageheight \fi \relax } \fi % \message{fonts,} % Set the baselineskip to #1, and the lineskip and strut size % correspondingly. There is no deep meaning behind these magic numbers % used as factors; they just match (closely enough) what Knuth defined. % \def\lineskipfactor{.08333} \def\strutheightpercent{.70833} \def\strutdepthpercent {.29167} % % can get a sort of poor man's double spacing by redefining this. \def\baselinefactor{1} % \newdimen\textleading \def\setleading#1{% \dimen0 = #1\relax \normalbaselineskip = \baselinefactor\dimen0 \normallineskip = \lineskipfactor\normalbaselineskip \normalbaselines \setbox\strutbox =\hbox{% \vrule width0pt height\strutheightpercent\baselineskip depth \strutdepthpercent \baselineskip }% } % PDF CMaps. See also LaTeX's t1.cmap. % % do nothing with this by default. \expandafter\let\csname cmapOT1\endcsname\gobble \expandafter\let\csname cmapOT1IT\endcsname\gobble \expandafter\let\csname cmapOT1TT\endcsname\gobble % if we are producing pdf, and we have \pdffontattr, then define cmaps. % (\pdffontattr was introduced many years ago, but people still run % older pdftex's; it's easy to conditionalize, so we do.) \ifpdf \ifx\pdffontattr\thisisundefined \else \begingroup \catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char. \catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap %%DocumentNeededResources: ProcSet (CIDInit) %%IncludeResource: ProcSet (CIDInit) %%BeginResource: CMap (TeX-OT1-0) %%Title: (TeX-OT1-0 TeX OT1 0) %%Version: 1.000 %%EndComments /CIDInit /ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo << /Registry (TeX) /Ordering (OT1) /Supplement 0 >> def /CMapName /TeX-OT1-0 def /CMapType 2 def 1 begincodespacerange <00> <7F> endcodespacerange 8 beginbfrange <00> <01> <0393> <09> <0A> <03A8> <23> <26> <0023> <28> <3B> <0028> <3F> <5B> <003F> <5D> <5E> <005D> <61> <7A> <0061> <7B> <7C> <2013> endbfrange 40 beginbfchar <02> <0398> <03> <039B> <04> <039E> <05> <03A0> <06> <03A3> <07> <03D2> <08> <03A6> <0B> <00660066> <0C> <00660069> <0D> <0066006C> <0E> <006600660069> <0F> <00660066006C> <10> <0131> <11> <0237> <12> <0060> <13> <00B4> <14> <02C7> <15> <02D8> <16> <00AF> <17> <02DA> <18> <00B8> <19> <00DF> <1A> <00E6> <1B> <0153> <1C> <00F8> <1D> <00C6> <1E> <0152> <1F> <00D8> <21> <0021> <22> <201D> <27> <2019> <3C> <00A1> <3D> <003D> <3E> <00BF> <5C> <201C> <5F> <02D9> <60> <2018> <7D> <02DD> <7E> <007E> <7F> <00A8> endbfchar endcmap CMapName currentdict /CMap defineresource pop end end %%EndResource %%EOF }\endgroup \expandafter\edef\csname cmapOT1\endcsname#1{% \pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}% }% % % \cmapOT1IT \begingroup \catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char. \catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap %%DocumentNeededResources: ProcSet (CIDInit) %%IncludeResource: ProcSet (CIDInit) %%BeginResource: CMap (TeX-OT1IT-0) %%Title: (TeX-OT1IT-0 TeX OT1IT 0) %%Version: 1.000 %%EndComments /CIDInit /ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo << /Registry (TeX) /Ordering (OT1IT) /Supplement 0 >> def /CMapName /TeX-OT1IT-0 def /CMapType 2 def 1 begincodespacerange <00> <7F> endcodespacerange 8 beginbfrange <00> <01> <0393> <09> <0A> <03A8> <25> <26> <0025> <28> <3B> <0028> <3F> <5B> <003F> <5D> <5E> <005D> <61> <7A> <0061> <7B> <7C> <2013> endbfrange 42 beginbfchar <02> <0398> <03> <039B> <04> <039E> <05> <03A0> <06> <03A3> <07> <03D2> <08> <03A6> <0B> <00660066> <0C> <00660069> <0D> <0066006C> <0E> <006600660069> <0F> <00660066006C> <10> <0131> <11> <0237> <12> <0060> <13> <00B4> <14> <02C7> <15> <02D8> <16> <00AF> <17> <02DA> <18> <00B8> <19> <00DF> <1A> <00E6> <1B> <0153> <1C> <00F8> <1D> <00C6> <1E> <0152> <1F> <00D8> <21> <0021> <22> <201D> <23> <0023> <24> <00A3> <27> <2019> <3C> <00A1> <3D> <003D> <3E> <00BF> <5C> <201C> <5F> <02D9> <60> <2018> <7D> <02DD> <7E> <007E> <7F> <00A8> endbfchar endcmap CMapName currentdict /CMap defineresource pop end end %%EndResource %%EOF }\endgroup \expandafter\edef\csname cmapOT1IT\endcsname#1{% \pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}% }% % % \cmapOT1TT \begingroup \catcode`\^^M=\active \def^^M{^^J}% Output line endings as the ^^J char. \catcode`\%=12 \immediate\pdfobj stream {%!PS-Adobe-3.0 Resource-CMap %%DocumentNeededResources: ProcSet (CIDInit) %%IncludeResource: ProcSet (CIDInit) %%BeginResource: CMap (TeX-OT1TT-0) %%Title: (TeX-OT1TT-0 TeX OT1TT 0) %%Version: 1.000 %%EndComments /CIDInit /ProcSet findresource begin 12 dict begin begincmap /CIDSystemInfo << /Registry (TeX) /Ordering (OT1TT) /Supplement 0 >> def /CMapName /TeX-OT1TT-0 def /CMapType 2 def 1 begincodespacerange <00> <7F> endcodespacerange 5 beginbfrange <00> <01> <0393> <09> <0A> <03A8> <21> <26> <0021> <28> <5F> <0028> <61> <7E> <0061> endbfrange 32 beginbfchar <02> <0398> <03> <039B> <04> <039E> <05> <03A0> <06> <03A3> <07> <03D2> <08> <03A6> <0B> <2191> <0C> <2193> <0D> <0027> <0E> <00A1> <0F> <00BF> <10> <0131> <11> <0237> <12> <0060> <13> <00B4> <14> <02C7> <15> <02D8> <16> <00AF> <17> <02DA> <18> <00B8> <19> <00DF> <1A> <00E6> <1B> <0153> <1C> <00F8> <1D> <00C6> <1E> <0152> <1F> <00D8> <20> <2423> <27> <2019> <60> <2018> <7F> <00A8> endbfchar endcmap CMapName currentdict /CMap defineresource pop end end %%EndResource %%EOF }\endgroup \expandafter\edef\csname cmapOT1TT\endcsname#1{% \pdffontattr#1{/ToUnicode \the\pdflastobj\space 0 R}% }% \fi\fi % Set the font macro #1 to the font named \fontprefix#2. % #3 is the font's design size, #4 is a scale factor, #5 is the CMap % encoding (only OT1, OT1IT and OT1TT are allowed, or empty to omit). % Example: % #1 = \textrm % #2 = \rmshape % #3 = 10 % #4 = \mainmagstep % #5 = OT1 % \def\setfont#1#2#3#4#5{% \font#1=\fontprefix#2#3 scaled #4 \csname cmap#5\endcsname#1% } % This is what gets called when #5 of \setfont is empty. \let\cmap\gobble % % (end of cmaps) % Use cm as the default font prefix. % To specify the font prefix, you must define \fontprefix % before you read in texinfo.tex. \ifx\fontprefix\thisisundefined \def\fontprefix{cm} \fi % Support font families that don't use the same naming scheme as CM. \def\rmshape{r} \def\rmbshape{bx} % where the normal face is bold \def\bfshape{b} \def\bxshape{bx} \def\ttshape{tt} \def\ttbshape{tt} \def\ttslshape{sltt} \def\itshape{ti} \def\itbshape{bxti} \def\slshape{sl} \def\slbshape{bxsl} \def\sfshape{ss} \def\sfbshape{ss} \def\scshape{csc} \def\scbshape{csc} % Definitions for a main text size of 11pt. (The default in Texinfo.) % \def\definetextfontsizexi{% % Text fonts (11.2pt, magstep1). \def\textnominalsize{11pt} \edef\mainmagstep{\magstephalf} \setfont\textrm\rmshape{10}{\mainmagstep}{OT1} \setfont\texttt\ttshape{10}{\mainmagstep}{OT1TT} \setfont\textbf\bfshape{10}{\mainmagstep}{OT1} \setfont\textit\itshape{10}{\mainmagstep}{OT1IT} \setfont\textsl\slshape{10}{\mainmagstep}{OT1} \setfont\textsf\sfshape{10}{\mainmagstep}{OT1} \setfont\textsc\scshape{10}{\mainmagstep}{OT1} \setfont\textttsl\ttslshape{10}{\mainmagstep}{OT1TT} \font\texti=cmmi10 scaled \mainmagstep \font\textsy=cmsy10 scaled \mainmagstep \def\textecsize{1095} % A few fonts for @defun names and args. \setfont\defbf\bfshape{10}{\magstep1}{OT1} \setfont\deftt\ttshape{10}{\magstep1}{OT1TT} \setfont\defsl\slshape{10}{\magstep1}{OT1} \setfont\defttsl\ttslshape{10}{\magstep1}{OT1TT} \def\df{\let\ttfont=\deftt \let\bffont = \defbf \let\ttslfont=\defttsl \let\slfont=\defsl \bf} % Fonts for indices, footnotes, small examples (9pt). \def\smallnominalsize{9pt} \setfont\smallrm\rmshape{9}{1000}{OT1} \setfont\smalltt\ttshape{9}{1000}{OT1TT} \setfont\smallbf\bfshape{10}{900}{OT1} \setfont\smallit\itshape{9}{1000}{OT1IT} \setfont\smallsl\slshape{9}{1000}{OT1} \setfont\smallsf\sfshape{9}{1000}{OT1} \setfont\smallsc\scshape{10}{900}{OT1} \setfont\smallttsl\ttslshape{10}{900}{OT1TT} \font\smalli=cmmi9 \font\smallsy=cmsy9 \def\smallecsize{0900} % Fonts for small examples (8pt). \def\smallernominalsize{8pt} \setfont\smallerrm\rmshape{8}{1000}{OT1} \setfont\smallertt\ttshape{8}{1000}{OT1TT} \setfont\smallerbf\bfshape{10}{800}{OT1} \setfont\smallerit\itshape{8}{1000}{OT1IT} \setfont\smallersl\slshape{8}{1000}{OT1} \setfont\smallersf\sfshape{8}{1000}{OT1} \setfont\smallersc\scshape{10}{800}{OT1} \setfont\smallerttsl\ttslshape{10}{800}{OT1TT} \font\smalleri=cmmi8 \font\smallersy=cmsy8 \def\smallerecsize{0800} % Fonts for math mode superscripts (7pt). \def\sevennominalsize{7pt} \setfont\sevenrm\rmshape{7}{1000}{OT1} \setfont\seventt\ttshape{10}{700}{OT1TT} \setfont\sevenbf\bfshape{10}{700}{OT1} \setfont\sevenit\itshape{7}{1000}{OT1IT} \setfont\sevensl\slshape{10}{700}{OT1} \setfont\sevensf\sfshape{10}{700}{OT1} \setfont\sevensc\scshape{10}{700}{OT1} \setfont\seventtsl\ttslshape{10}{700}{OT1TT} \font\seveni=cmmi7 \font\sevensy=cmsy7 \def\sevenecsize{0700} % Fonts for title page (20.4pt): \def\titlenominalsize{20pt} \setfont\titlerm\rmbshape{12}{\magstep3}{OT1} \setfont\titleit\itbshape{10}{\magstep4}{OT1IT} \setfont\titlesl\slbshape{10}{\magstep4}{OT1} \setfont\titlett\ttbshape{12}{\magstep3}{OT1TT} \setfont\titlettsl\ttslshape{10}{\magstep4}{OT1TT} \setfont\titlesf\sfbshape{17}{\magstep1}{OT1} \let\titlebf=\titlerm \setfont\titlesc\scbshape{10}{\magstep4}{OT1} \font\titlei=cmmi12 scaled \magstep3 \font\titlesy=cmsy10 scaled \magstep4 \def\titleecsize{2074} % Chapter (and unnumbered) fonts (17.28pt). \def\chapnominalsize{17pt} \setfont\chaprm\rmbshape{12}{\magstep2}{OT1} \setfont\chapit\itbshape{10}{\magstep3}{OT1IT} \setfont\chapsl\slbshape{10}{\magstep3}{OT1} \setfont\chaptt\ttbshape{12}{\magstep2}{OT1TT} \setfont\chapttsl\ttslshape{10}{\magstep3}{OT1TT} \setfont\chapsf\sfbshape{17}{1000}{OT1} \let\chapbf=\chaprm \setfont\chapsc\scbshape{10}{\magstep3}{OT1} \font\chapi=cmmi12 scaled \magstep2 \font\chapsy=cmsy10 scaled \magstep3 \def\chapecsize{1728} % Section fonts (14.4pt). \def\secnominalsize{14pt} \setfont\secrm\rmbshape{12}{\magstep1}{OT1} \setfont\secrmnotbold\rmshape{12}{\magstep1}{OT1} \setfont\secit\itbshape{10}{\magstep2}{OT1IT} \setfont\secsl\slbshape{10}{\magstep2}{OT1} \setfont\sectt\ttbshape{12}{\magstep1}{OT1TT} \setfont\secttsl\ttslshape{10}{\magstep2}{OT1TT} \setfont\secsf\sfbshape{12}{\magstep1}{OT1} \let\secbf\secrm \setfont\secsc\scbshape{10}{\magstep2}{OT1} \font\seci=cmmi12 scaled \magstep1 \font\secsy=cmsy10 scaled \magstep2 \def\sececsize{1440} % Subsection fonts (13.15pt). \def\ssecnominalsize{13pt} \setfont\ssecrm\rmbshape{12}{\magstephalf}{OT1} \setfont\ssecit\itbshape{10}{1315}{OT1IT} \setfont\ssecsl\slbshape{10}{1315}{OT1} \setfont\ssectt\ttbshape{12}{\magstephalf}{OT1TT} \setfont\ssecttsl\ttslshape{10}{1315}{OT1TT} \setfont\ssecsf\sfbshape{12}{\magstephalf}{OT1} \let\ssecbf\ssecrm \setfont\ssecsc\scbshape{10}{1315}{OT1} \font\sseci=cmmi12 scaled \magstephalf \font\ssecsy=cmsy10 scaled 1315 \def\ssececsize{1200} % Reduced fonts for @acronym in text (10pt). \def\reducednominalsize{10pt} \setfont\reducedrm\rmshape{10}{1000}{OT1} \setfont\reducedtt\ttshape{10}{1000}{OT1TT} \setfont\reducedbf\bfshape{10}{1000}{OT1} \setfont\reducedit\itshape{10}{1000}{OT1IT} \setfont\reducedsl\slshape{10}{1000}{OT1} \setfont\reducedsf\sfshape{10}{1000}{OT1} \setfont\reducedsc\scshape{10}{1000}{OT1} \setfont\reducedttsl\ttslshape{10}{1000}{OT1TT} \font\reducedi=cmmi10 \font\reducedsy=cmsy10 \def\reducedecsize{1000} \textleading = 13.2pt % line spacing for 11pt CM \textfonts % reset the current fonts \rm } % end of 11pt text font size definitions, \definetextfontsizexi % Definitions to make the main text be 10pt Computer Modern, with % section, chapter, etc., sizes following suit. This is for the GNU % Press printing of the Emacs 22 manual. Maybe other manuals in the % future. Used with @smallbook, which sets the leading to 12pt. % \def\definetextfontsizex{% % Text fonts (10pt). \def\textnominalsize{10pt} \edef\mainmagstep{1000} \setfont\textrm\rmshape{10}{\mainmagstep}{OT1} \setfont\texttt\ttshape{10}{\mainmagstep}{OT1TT} \setfont\textbf\bfshape{10}{\mainmagstep}{OT1} \setfont\textit\itshape{10}{\mainmagstep}{OT1IT} \setfont\textsl\slshape{10}{\mainmagstep}{OT1} \setfont\textsf\sfshape{10}{\mainmagstep}{OT1} \setfont\textsc\scshape{10}{\mainmagstep}{OT1} \setfont\textttsl\ttslshape{10}{\mainmagstep}{OT1TT} \font\texti=cmmi10 scaled \mainmagstep \font\textsy=cmsy10 scaled \mainmagstep \def\textecsize{1000} % A few fonts for @defun names and args. \setfont\defbf\bfshape{10}{\magstephalf}{OT1} \setfont\deftt\ttshape{10}{\magstephalf}{OT1TT} \setfont\defsl\slshape{10}{\magstephalf}{OT1} \setfont\defttsl\ttslshape{10}{\magstephalf}{OT1TT} \def\df{\let\ttfont=\deftt \let\bffont = \defbf \let\slfont=\defsl \let\ttslfont=\defttsl \bf} % Fonts for indices, footnotes, small examples (9pt). \def\smallnominalsize{9pt} \setfont\smallrm\rmshape{9}{1000}{OT1} \setfont\smalltt\ttshape{9}{1000}{OT1TT} \setfont\smallbf\bfshape{10}{900}{OT1} \setfont\smallit\itshape{9}{1000}{OT1IT} \setfont\smallsl\slshape{9}{1000}{OT1} \setfont\smallsf\sfshape{9}{1000}{OT1} \setfont\smallsc\scshape{10}{900}{OT1} \setfont\smallttsl\ttslshape{10}{900}{OT1TT} \font\smalli=cmmi9 \font\smallsy=cmsy9 \def\smallecsize{0900} % Fonts for small examples (8pt). \def\smallernominalsize{8pt} \setfont\smallerrm\rmshape{8}{1000}{OT1} \setfont\smallertt\ttshape{8}{1000}{OT1TT} \setfont\smallerbf\bfshape{10}{800}{OT1} \setfont\smallerit\itshape{8}{1000}{OT1IT} \setfont\smallersl\slshape{8}{1000}{OT1} \setfont\smallersf\sfshape{8}{1000}{OT1} \setfont\smallersc\scshape{10}{800}{OT1} \setfont\smallerttsl\ttslshape{10}{800}{OT1TT} \font\smalleri=cmmi8 \font\smallersy=cmsy8 \def\smallerecsize{0800} % Fonts for math mode superscripts (7pt). \def\sevennominalsize{7pt} \setfont\sevenrm\rmshape{7}{1000}{OT1} \setfont\seventt\ttshape{10}{700}{OT1TT} \setfont\sevenbf\bfshape{10}{700}{OT1} \setfont\sevenit\itshape{7}{1000}{OT1IT} \setfont\sevensl\slshape{10}{700}{OT1} \setfont\sevensf\sfshape{10}{700}{OT1} \setfont\sevensc\scshape{10}{700}{OT1} \setfont\seventtsl\ttslshape{10}{700}{OT1TT} \font\seveni=cmmi7 \font\sevensy=cmsy7 \def\sevenecsize{0700} % Fonts for title page (20.4pt): \def\titlenominalsize{20pt} \setfont\titlerm\rmbshape{12}{\magstep3}{OT1} \setfont\titleit\itbshape{10}{\magstep4}{OT1IT} \setfont\titlesl\slbshape{10}{\magstep4}{OT1} \setfont\titlett\ttbshape{12}{\magstep3}{OT1TT} \setfont\titlettsl\ttslshape{10}{\magstep4}{OT1TT} \setfont\titlesf\sfbshape{17}{\magstep1}{OT1} \let\titlebf=\titlerm \setfont\titlesc\scbshape{10}{\magstep4}{OT1} \font\titlei=cmmi12 scaled \magstep3 \font\titlesy=cmsy10 scaled \magstep4 \def\titleecsize{2074} % Chapter fonts (14.4pt). \def\chapnominalsize{14pt} \setfont\chaprm\rmbshape{12}{\magstep1}{OT1} \setfont\chapit\itbshape{10}{\magstep2}{OT1IT} \setfont\chapsl\slbshape{10}{\magstep2}{OT1} \setfont\chaptt\ttbshape{12}{\magstep1}{OT1TT} \setfont\chapttsl\ttslshape{10}{\magstep2}{OT1TT} \setfont\chapsf\sfbshape{12}{\magstep1}{OT1} \let\chapbf\chaprm \setfont\chapsc\scbshape{10}{\magstep2}{OT1} \font\chapi=cmmi12 scaled \magstep1 \font\chapsy=cmsy10 scaled \magstep2 \def\chapecsize{1440} % Section fonts (12pt). \def\secnominalsize{12pt} \setfont\secrm\rmbshape{12}{1000}{OT1} \setfont\secit\itbshape{10}{\magstep1}{OT1IT} \setfont\secsl\slbshape{10}{\magstep1}{OT1} \setfont\sectt\ttbshape{12}{1000}{OT1TT} \setfont\secttsl\ttslshape{10}{\magstep1}{OT1TT} \setfont\secsf\sfbshape{12}{1000}{OT1} \let\secbf\secrm \setfont\secsc\scbshape{10}{\magstep1}{OT1} \font\seci=cmmi12 \font\secsy=cmsy10 scaled \magstep1 \def\sececsize{1200} % Subsection fonts (10pt). \def\ssecnominalsize{10pt} \setfont\ssecrm\rmbshape{10}{1000}{OT1} \setfont\ssecit\itbshape{10}{1000}{OT1IT} \setfont\ssecsl\slbshape{10}{1000}{OT1} \setfont\ssectt\ttbshape{10}{1000}{OT1TT} \setfont\ssecttsl\ttslshape{10}{1000}{OT1TT} \setfont\ssecsf\sfbshape{10}{1000}{OT1} \let\ssecbf\ssecrm \setfont\ssecsc\scbshape{10}{1000}{OT1} \font\sseci=cmmi10 \font\ssecsy=cmsy10 \def\ssececsize{1000} % Reduced fonts for @acronym in text (9pt). \def\reducednominalsize{9pt} \setfont\reducedrm\rmshape{9}{1000}{OT1} \setfont\reducedtt\ttshape{9}{1000}{OT1TT} \setfont\reducedbf\bfshape{10}{900}{OT1} \setfont\reducedit\itshape{9}{1000}{OT1IT} \setfont\reducedsl\slshape{9}{1000}{OT1} \setfont\reducedsf\sfshape{9}{1000}{OT1} \setfont\reducedsc\scshape{10}{900}{OT1} \setfont\reducedttsl\ttslshape{10}{900}{OT1TT} \font\reducedi=cmmi9 \font\reducedsy=cmsy9 \def\reducedecsize{0900} \divide\parskip by 2 % reduce space between paragraphs \textleading = 12pt % line spacing for 10pt CM \textfonts % reset the current fonts \rm } % end of 10pt text font size definitions, \definetextfontsizex % Fonts for short table of contents. \setfont\shortcontrm\rmshape{12}{1000}{OT1} \setfont\shortcontbf\bfshape{10}{\magstep1}{OT1} % no cmb12 \setfont\shortcontsl\slshape{12}{1000}{OT1} \setfont\shortconttt\ttshape{12}{1000}{OT1TT} % We provide the user-level command % @fonttextsize 10 % (or 11) to redefine the text font size. pt is assumed. % \def\xiword{11} \def\xword{10} \def\xwordpt{10pt} % \parseargdef\fonttextsize{% \def\textsizearg{#1}% %\wlog{doing @fonttextsize \textsizearg}% % % Set \globaldefs so that documents can use this inside @tex, since % makeinfo 4.8 does not support it, but we need it nonetheless. % \begingroup \globaldefs=1 \ifx\textsizearg\xword \definetextfontsizex \else \ifx\textsizearg\xiword \definetextfontsizexi \else \errhelp=\EMsimple \errmessage{@fonttextsize only supports `10' or `11', not `\textsizearg'} \fi\fi \endgroup } % % Change the current font style to #1, remembering it in \curfontstyle. % For now, we do not accumulate font styles: @b{@i{foo}} prints foo in % italics, not bold italics. % \def\setfontstyle#1{% \def\curfontstyle{#1}% not as a control sequence, because we are \edef'd. \csname #1font\endcsname % change the current font } \def\rm{\fam=0 \setfontstyle{rm}} \def\it{\fam=\itfam \setfontstyle{it}} \def\sl{\fam=\slfam \setfontstyle{sl}} \def\bf{\fam=\bffam \setfontstyle{bf}}\def\bfstylename{bf} \def\tt{\fam=\ttfam \setfontstyle{tt}}\def\ttstylename{tt} % Texinfo sort of supports the sans serif font style, which plain TeX does not. % So we set up a \sf. \newfam\sffam \def\sf{\fam=\sffam \setfontstyle{sf}} % We don't need math for this font style. \def\ttsl{\setfontstyle{ttsl}} % In order for the font changes to affect most math symbols and letters, % we have to define the \textfont of the standard families. % We don't bother to reset \scriptscriptfont; awaiting user need. % \def\resetmathfonts{% \textfont0=\rmfont \textfont1=\ifont \textfont2=\syfont \textfont\itfam=\itfont \textfont\slfam=\slfont \textfont\bffam=\bffont \textfont\ttfam=\ttfont \textfont\sffam=\sffont % % Fonts for superscript. Note that the 7pt fonts are used regardless % of the current font size. \scriptfont0=\sevenrm \scriptfont1=\seveni \scriptfont2=\sevensy \scriptfont\itfam=\sevenit \scriptfont\slfam=\sevensl \scriptfont\bffam=\sevenbf \scriptfont\ttfam=\seventt \scriptfont\sffam=\sevensf } % % The font-changing commands (all called \...fonts) redefine the meanings % of \STYLEfont, instead of just \STYLE. We do this because \STYLE needs % to also set the current \fam for math mode. Our \STYLE (e.g., \rm) % commands hardwire \STYLEfont to set the current font. % % The fonts used for \ifont are for "math italics" (\itfont is for italics % in regular text). \syfont is also used in math mode only. % % Each font-changing command also sets the names \lsize (one size lower) % and \lllsize (three sizes lower). These relative commands are used % in, e.g., the LaTeX logo and acronyms. % % This all needs generalizing, badly. % \def\assignfonts#1{% \expandafter\let\expandafter\rmfont\csname #1rm\endcsname \expandafter\let\expandafter\itfont\csname #1it\endcsname \expandafter\let\expandafter\slfont\csname #1sl\endcsname \expandafter\let\expandafter\bffont\csname #1bf\endcsname \expandafter\let\expandafter\ttfont\csname #1tt\endcsname \expandafter\let\expandafter\smallcaps\csname #1sc\endcsname \expandafter\let\expandafter\sffont \csname #1sf\endcsname \expandafter\let\expandafter\ifont \csname #1i\endcsname \expandafter\let\expandafter\syfont \csname #1sy\endcsname \expandafter\let\expandafter\ttslfont\csname #1ttsl\endcsname } \newif\ifrmisbold % Select smaller font size with the current style. Used to change font size % in, e.g., the LaTeX logo and acronyms. If we are using bold fonts for % normal roman text, also use bold fonts for roman text in the smaller size. \def\switchtolllsize{% \expandafter\assignfonts\expandafter{\lllsize}% \ifrmisbold \let\rmfont\bffont \fi \csname\curfontstyle\endcsname }% \def\switchtolsize{% \expandafter\assignfonts\expandafter{\lsize}% \ifrmisbold \let\rmfont\bffont \fi \csname\curfontstyle\endcsname }% \def\definefontsetatsize#1#2#3#4#5{% \expandafter\def\csname #1fonts\endcsname{% \def\curfontsize{#1}% \def\lsize{#2}\def\lllsize{#3}% \csname rmisbold#5\endcsname \assignfonts{#1}% \resetmathfonts \setleading{#4}% }} \definefontsetatsize{text} {reduced}{smaller}{\textleading}{false} \definefontsetatsize{title} {chap} {subsec} {27pt} {true} \definefontsetatsize{chap} {sec} {text} {19pt} {true} \definefontsetatsize{sec} {subsec} {reduced}{17pt} {true} \definefontsetatsize{ssec} {text} {small} {15pt} {true} \definefontsetatsize{reduced}{small} {smaller}{10.5pt}{false} \definefontsetatsize{small} {smaller}{smaller}{10.5pt}{false} \definefontsetatsize{smaller}{smaller}{smaller}{9.5pt} {false} \def\titlefont#1{{\titlefonts\rm #1}} \let\subsecfonts = \ssecfonts \let\subsubsecfonts = \ssecfonts % Define these just so they can be easily changed for other fonts. \def\angleleft{$\langle$} \def\angleright{$\rangle$} % Set the fonts to use with the @small... environments. \let\smallexamplefonts = \smallfonts % About \smallexamplefonts. If we use \smallfonts (9pt), @smallexample % can fit this many characters: % 8.5x11=86 smallbook=72 a4=90 a5=69 % If we use \scriptfonts (8pt), then we can fit this many characters: % 8.5x11=90+ smallbook=80 a4=90+ a5=77 % For me, subjectively, the few extra characters that fit aren't worth % the additional smallness of 8pt. So I'm making the default 9pt. % % By the way, for comparison, here's what fits with @example (10pt): % 8.5x11=71 smallbook=60 a4=75 a5=58 % --karl, 24jan03. % Set up the default fonts, so we can use them for creating boxes. % \definetextfontsizexi % Check if we are currently using a typewriter font. Since all the % Computer Modern typewriter fonts have zero interword stretch (and % shrink), and it is reasonable to expect all typewriter fonts to have % this property, we can check that font parameter. % \def\ifmonospace{\ifdim\fontdimen3\font=0pt } { \catcode`\'=\active \catcode`\`=\active \gdef\setcodequotes{\let`\codequoteleft \let'\codequoteright} \gdef\setregularquotes{\let`\lq \let'\rq} } % Allow an option to not use regular directed right quote/apostrophe % (char 0x27), but instead the undirected quote from cmtt (char 0x0d). % The undirected quote is ugly, so don't make it the default, but it % works for pasting with more pdf viewers (at least evince), the % lilypond developers report. xpdf does work with the regular 0x27. % \def\codequoteright{% \ifmonospace \expandafter\ifx\csname SETtxicodequoteundirected\endcsname\relax \expandafter\ifx\csname SETcodequoteundirected\endcsname\relax '% \else \char'15 \fi \else \char'15 \fi \else '% \fi } % % and a similar option for the left quote char vs. a grave accent. % Modern fonts display ASCII 0x60 as a grave accent, so some people like % the code environments to do likewise. % \def\codequoteleft{% \ifmonospace \expandafter\ifx\csname SETtxicodequotebacktick\endcsname\relax \expandafter\ifx\csname SETcodequotebacktick\endcsname\relax % [Knuth] pp. 380,381,391 % \relax disables Spanish ligatures ?` and !` of \tt font. \relax`% \else \char'22 \fi \else \char'22 \fi \else \relax`% \fi } % Commands to set the quote options. % \parseargdef\codequoteundirected{% \def\temp{#1}% \ifx\temp\onword \expandafter\let\csname SETtxicodequoteundirected\endcsname = t% \else\ifx\temp\offword \expandafter\let\csname SETtxicodequoteundirected\endcsname = \relax \else \errhelp = \EMsimple \errmessage{Unknown @codequoteundirected value `\temp', must be on|off}% \fi\fi } % \parseargdef\codequotebacktick{% \def\temp{#1}% \ifx\temp\onword \expandafter\let\csname SETtxicodequotebacktick\endcsname = t% \else\ifx\temp\offword \expandafter\let\csname SETtxicodequotebacktick\endcsname = \relax \else \errhelp = \EMsimple \errmessage{Unknown @codequotebacktick value `\temp', must be on|off}% \fi\fi } % [Knuth] pp. 380,381,391, disable Spanish ligatures ?` and !` of \tt font. \def\noligaturesquoteleft{\relax\lq} % Count depth in font-changes, for error checks \newcount\fontdepth \fontdepth=0 % Font commands. % #1 is the font command (\sl or \it), #2 is the text to slant. % If we are in a monospaced environment, however, 1) always use \ttsl, % and 2) do not add an italic correction. \def\dosmartslant#1#2{% \ifusingtt {{\ttsl #2}\let\next=\relax}% {\def\next{{#1#2}\futurelet\next\smartitaliccorrection}}% \next } \def\smartslanted{\dosmartslant\sl} \def\smartitalic{\dosmartslant\it} % Output an italic correction unless \next (presumed to be the following % character) is such as not to need one. \def\smartitaliccorrection{% \ifx\next,% \else\ifx\next-% \else\ifx\next.% \else\ifx\next\.% \else\ifx\next\comma% \else\ptexslash \fi\fi\fi\fi\fi \aftersmartic } % Unconditional use \ttsl, and no ic. @var is set to this for defuns. \def\ttslanted#1{{\ttsl #1}} % @cite is like \smartslanted except unconditionally use \sl. We never want % ttsl for book titles, do we? \def\cite#1{{\sl #1}\futurelet\next\smartitaliccorrection} \def\aftersmartic{} \def\var#1{% \let\saveaftersmartic = \aftersmartic \def\aftersmartic{\null\let\aftersmartic=\saveaftersmartic}% \smartslanted{#1}% } \let\i=\smartitalic \let\slanted=\smartslanted \let\dfn=\smartslanted \let\emph=\smartitalic % Explicit font changes: @r, @sc, undocumented @ii. \def\r#1{{\rm #1}} % roman font \def\sc#1{{\smallcaps#1}} % smallcaps font \def\ii#1{{\it #1}} % italic font % @b, explicit bold. Also @strong. \def\b#1{{\bf #1}} \let\strong=\b % @sansserif, explicit sans. \def\sansserif#1{{\sf #1}} % We can't just use \exhyphenpenalty, because that only has effect at % the end of a paragraph. Restore normal hyphenation at the end of the % group within which \nohyphenation is presumably called. % \def\nohyphenation{\hyphenchar\font = -1 \aftergroup\restorehyphenation} \def\restorehyphenation{\hyphenchar\font = `- } % Set sfcode to normal for the chars that usually have another value. % Can't use plain's \frenchspacing because it uses the `\x notation, and % sometimes \x has an active definition that messes things up. % \catcode`@=11 \def\plainfrenchspacing{% \sfcode`\.=\@m \sfcode`\?=\@m \sfcode`\!=\@m \sfcode`\:=\@m \sfcode`\;=\@m \sfcode`\,=\@m \def\endofsentencespacefactor{1000}% for @. and friends } \def\plainnonfrenchspacing{% \sfcode`\.3000\sfcode`\?3000\sfcode`\!3000 \sfcode`\:2000\sfcode`\;1500\sfcode`\,1250 \def\endofsentencespacefactor{3000}% for @. and friends } \catcode`@=\other \def\endofsentencespacefactor{3000}% default % @t, explicit typewriter. \def\t#1{% {\tt \plainfrenchspacing #1}% \null } % @samp. \def\samp#1{{\setcodequotes\lq\tclose{#1}\rq\null}} % @indicateurl is \samp, that is, with quotes. \let\indicateurl=\samp % @code (and similar) prints in typewriter, but with spaces the same % size as normal in the surrounding text, without hyphenation, etc. % This is a subroutine for that. \def\tclose#1{% {% % Change normal interword space to be same as for the current font. \spaceskip = \fontdimen2\font % % Switch to typewriter. \tt % % But `\ ' produces the large typewriter interword space. \def\ {{\spaceskip = 0pt{} }}% % % Turn off hyphenation. \nohyphenation % \plainfrenchspacing #1% }% \null % reset spacefactor to 1000 } % We *must* turn on hyphenation at `-' and `_' in @code. % (But see \codedashfinish below.) % Otherwise, it is too hard to avoid overfull hboxes % in the Emacs manual, the Library manual, etc. % % Unfortunately, TeX uses one parameter (\hyphenchar) to control % both hyphenation at - and hyphenation within words. % We must therefore turn them both off (\tclose does that) % and arrange explicitly to hyphenate at a dash. -- rms. { \catcode`\-=\active \catcode`\_=\active \catcode`\'=\active \catcode`\`=\active \global\let'=\rq \global\let`=\lq % default definitions % \global\def\code{\begingroup \setcodequotes \catcode\dashChar=\active \catcode\underChar=\active \ifallowcodebreaks \let-\codedash \let_\codeunder \else \let-\normaldash \let_\realunder \fi % Given -foo (with a single dash), we do not want to allow a break % after the hyphen. \global\let\codedashprev=\codedash % \codex } % \gdef\codedash{\futurelet\next\codedashfinish} \gdef\codedashfinish{% \normaldash % always output the dash character itself. % % Now, output a discretionary to allow a line break, unless % (a) the next character is a -, or % (b) the preceding character is a -. % E.g., given --posix, we do not want to allow a break after either -. % Given --foo-bar, we do want to allow a break between the - and the b. \ifx\next\codedash \else \ifx\codedashprev\codedash \else \discretionary{}{}{}\fi \fi % we need the space after the = for the case when \next itself is a % space token; it would get swallowed otherwise. As in @code{- a}. \global\let\codedashprev= \next } } \def\normaldash{-} % \def\codex #1{\tclose{#1}\endgroup} \def\codeunder{% % this is all so @math{@code{var_name}+1} can work. In math mode, _ % is "active" (mathcode"8000) and \normalunderscore (or \char95, etc.) % will therefore expand the active definition of _, which is us % (inside @code that is), therefore an endless loop. \ifusingtt{\ifmmode \mathchar"075F % class 0=ordinary, family 7=ttfam, pos 0x5F=_. \else\normalunderscore \fi \discretionary{}{}{}}% {\_}% } % An additional complication: the above will allow breaks after, e.g., % each of the four underscores in __typeof__. This is bad. % @allowcodebreaks provides a document-level way to turn breaking at - % and _ on and off. % \newif\ifallowcodebreaks \allowcodebreakstrue \def\keywordtrue{true} \def\keywordfalse{false} \parseargdef\allowcodebreaks{% \def\txiarg{#1}% \ifx\txiarg\keywordtrue \allowcodebreakstrue \else\ifx\txiarg\keywordfalse \allowcodebreaksfalse \else \errhelp = \EMsimple \errmessage{Unknown @allowcodebreaks option `\txiarg', must be true|false}% \fi\fi } % For @command, @env, @file, @option quotes seem unnecessary, % so use \code rather than \samp. \let\command=\code \let\env=\code \let\file=\code \let\option=\code % @uref (abbreviation for `urlref') aka @url takes an optional % (comma-separated) second argument specifying the text to display and % an optional third arg as text to display instead of (rather than in % addition to) the url itself. First (mandatory) arg is the url. % TeX-only option to allow changing PDF output to show only the second % arg (if given), and not the url (which is then just the link target). \newif\ifurefurlonlylink % The default \pretolerance setting stops the penalty inserted in % \urefallowbreak being a discouragement to line breaking. Set it to % a negative value for this paragraph only. Hopefully this does not % conflict with redefinitions of \par done elsewhere. \def\nopretolerance{% \pretolerance=-1 \def\par{\endgraf\pretolerance=100 \let\par\endgraf}% } % The main macro is \urefbreak, which allows breaking at expected % places within the url. \def\urefbreak{\nopretolerance \begingroup \urefcatcodes \dourefbreak} \let\uref=\urefbreak % \def\dourefbreak#1{\urefbreakfinish #1,,,\finish} \def\urefbreakfinish#1,#2,#3,#4\finish{% doesn't work in @example \unsepspaces \pdfurl{#1}% \setbox0 = \hbox{\ignorespaces #3}% \ifdim\wd0 > 0pt \unhbox0 % third arg given, show only that \else \setbox0 = \hbox{\ignorespaces #2}% look for second arg \ifdim\wd0 > 0pt \ifpdf % For pdfTeX and LuaTeX \ifurefurlonlylink % PDF plus option to not display url, show just arg \unhbox0 \else % PDF, normally display both arg and url for consistency, % visibility, if the pdf is eventually used to print, etc. \unhbox0\ (\urefcode{#1})% \fi \else \ifx\XeTeXrevision\thisisundefined \unhbox0\ (\urefcode{#1})% DVI, always show arg and url \else % For XeTeX \ifurefurlonlylink % PDF plus option to not display url, show just arg \unhbox0 \else % PDF, normally display both arg and url for consistency, % visibility, if the pdf is eventually used to print, etc. \unhbox0\ (\urefcode{#1})% \fi \fi \fi \else \urefcode{#1}% only url given, so show it \fi \fi \endlink \endgroup} % Allow line breaks around only a few characters (only). \def\urefcatcodes{% \catcode`\&=\active \catcode`\.=\active \catcode`\#=\active \catcode`\?=\active \catcode`\/=\active } { \urefcatcodes % \global\def\urefcode{\begingroup \setcodequotes \urefcatcodes \let&\urefcodeamp \let.\urefcodedot \let#\urefcodehash \let?\urefcodequest \let/\urefcodeslash \codex } % % By default, they are just regular characters. \global\def&{\normalamp} \global\def.{\normaldot} \global\def#{\normalhash} \global\def?{\normalquest} \global\def/{\normalslash} } \def\urefcodeamp{\urefprebreak \&\urefpostbreak} \def\urefcodedot{\urefprebreak .\urefpostbreak} \def\urefcodehash{\urefprebreak \#\urefpostbreak} \def\urefcodequest{\urefprebreak ?\urefpostbreak} \def\urefcodeslash{\futurelet\next\urefcodeslashfinish} { \catcode`\/=\active \global\def\urefcodeslashfinish{% \urefprebreak \slashChar % Allow line break only after the final / in a sequence of % slashes, to avoid line break between the slashes in http://. \ifx\next/\else \urefpostbreak \fi } } % By default we'll break after the special characters, but some people like to % break before the special chars, so allow that. Also allow no breaking at % all, for manual control. % \parseargdef\urefbreakstyle{% \def\txiarg{#1}% \ifx\txiarg\wordnone \def\urefprebreak{\nobreak}\def\urefpostbreak{\nobreak} \else\ifx\txiarg\wordbefore \def\urefprebreak{\urefallowbreak}\def\urefpostbreak{\nobreak} \else\ifx\txiarg\wordafter \def\urefprebreak{\nobreak}\def\urefpostbreak{\urefallowbreak} \else \errhelp = \EMsimple \errmessage{Unknown @urefbreakstyle setting `\txiarg'}% \fi\fi\fi } \def\wordafter{after} \def\wordbefore{before} \def\wordnone{none} % Allow a ragged right output to aid breaking long URL's. There can % be a break at the \allowbreak with no extra glue (if the existing stretch in % the line is sufficient), a break at the \penalty with extra glue added % at the end of the line, or no break at all here. % Changing the value of the penalty and/or the amount of stretch affects how % preferable one choice is over the other. \def\urefallowbreak{% \penalty0\relax \hskip 0pt plus 2 em\relax \penalty1000\relax \hskip 0pt plus -2 em\relax } \urefbreakstyle after % @url synonym for @uref, since that's how everyone uses it. % \let\url=\uref % rms does not like angle brackets --karl, 17may97. % So now @email is just like @uref, unless we are pdf. % %\def\email#1{\angleleft{\tt #1}\angleright} \ifpdforxetex \def\email#1{\doemail#1,,\finish} \def\doemail#1,#2,#3\finish{\begingroup \unsepspaces \pdfurl{mailto:#1}% \setbox0 = \hbox{\ignorespaces #2}% \ifdim\wd0>0pt\unhbox0\else\code{#1}\fi \endlink \endgroup} \else \let\email=\uref \fi % @kbdinputstyle -- arg is `distinct' (@kbd uses slanted tty font always), % `example' (@kbd uses ttsl only inside of @example and friends), % or `code' (@kbd uses normal tty font always). \parseargdef\kbdinputstyle{% \def\txiarg{#1}% \ifx\txiarg\worddistinct \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\ttsl}% \else\ifx\txiarg\wordexample \gdef\kbdexamplefont{\ttsl}\gdef\kbdfont{\tt}% \else\ifx\txiarg\wordcode \gdef\kbdexamplefont{\tt}\gdef\kbdfont{\tt}% \else \errhelp = \EMsimple \errmessage{Unknown @kbdinputstyle setting `\txiarg'}% \fi\fi\fi } \def\worddistinct{distinct} \def\wordexample{example} \def\wordcode{code} % Default is `distinct'. \kbdinputstyle distinct % @kbd is like @code, except that if the argument is just one @key command, % then @kbd has no effect. \def\kbd#1{{\def\look{#1}\expandafter\kbdsub\look??\par}} \def\xkey{\key} \def\kbdsub#1#2#3\par{% \def\one{#1}\def\three{#3}\def\threex{??}% \ifx\one\xkey\ifx\threex\three \key{#2}% \else{\tclose{\kbdfont\setcodequotes\look}}\fi \else{\tclose{\kbdfont\setcodequotes\look}}\fi } % definition of @key that produces a lozenge. Doesn't adjust to text size. %\setfont\keyrm\rmshape{8}{1000}{OT1} %\font\keysy=cmsy9 %\def\key#1{{\keyrm\textfont2=\keysy \leavevmode\hbox{% % \raise0.4pt\hbox{\angleleft}\kern-.08em\vtop{% % \vbox{\hrule\kern-0.4pt % \hbox{\raise0.4pt\hbox{\vphantom{\angleleft}}#1}}% % \kern-0.4pt\hrule}% % \kern-.06em\raise0.4pt\hbox{\angleright}}}} % definition of @key with no lozenge. If the current font is already % monospace, don't change it; that way, we respect @kbdinputstyle. But % if it isn't monospace, then use \tt. % \def\key#1{{\setregularquotes \nohyphenation \ifmonospace\else\tt\fi #1}\null} % @clicksequence{File @click{} Open ...} \def\clicksequence#1{\begingroup #1\endgroup} % @clickstyle @arrow (by default) \parseargdef\clickstyle{\def\click{#1}} \def\click{\arrow} % Typeset a dimension, e.g., `in' or `pt'. The only reason for the % argument is to make the input look right: @dmn{pt} instead of @dmn{}pt. % \def\dmn#1{\thinspace #1} % @acronym for "FBI", "NATO", and the like. % We print this one point size smaller, since it's intended for % all-uppercase. % \def\acronym#1{\doacronym #1,,\finish} \def\doacronym#1,#2,#3\finish{% {\switchtolsize #1}% \def\temp{#2}% \ifx\temp\empty \else \space ({\unsepspaces \ignorespaces \temp \unskip})% \fi \null % reset \spacefactor=1000 } % @abbr for "Comput. J." and the like. % No font change, but don't do end-of-sentence spacing. % \def\abbr#1{\doabbr #1,,\finish} \def\doabbr#1,#2,#3\finish{% {\plainfrenchspacing #1}% \def\temp{#2}% \ifx\temp\empty \else \space ({\unsepspaces \ignorespaces \temp \unskip})% \fi \null % reset \spacefactor=1000 } % @asis just yields its argument. Used with @table, for example. % \def\asis#1{#1} % @math outputs its argument in math mode. % % One complication: _ usually means subscripts, but it could also mean % an actual _ character, as in @math{@var{some_variable} + 1}. So make % _ active, and distinguish by seeing if the current family is \slfam, % which is what @var uses. { \catcode`\_ = \active \gdef\mathunderscore{% \catcode`\_=\active \def_{\ifnum\fam=\slfam \_\else\sb\fi}% } } % Another complication: we want \\ (and @\) to output a math (or tt) \. % FYI, plain.tex uses \\ as a temporary control sequence (for no % particular reason), but this is not advertised and we don't care. % % The \mathchar is class=0=ordinary, family=7=ttfam, position=5C=\. \def\mathbackslash{\ifnum\fam=\ttfam \mathchar"075C \else\backslash \fi} % \def\math{% \ifmmode\else % only go into math if not in math mode already \tex \mathunderscore \let\\ = \mathbackslash \mathactive % make the texinfo accent commands work in math mode \let\"=\ddot \let\'=\acute \let\==\bar \let\^=\hat \let\`=\grave \let\u=\breve \let\v=\check \let\~=\tilde \let\dotaccent=\dot % have to provide another name for sup operator \let\mathopsup=\sup $\expandafter\finishmath\fi } \def\finishmath#1{#1$\endgroup} % Close the group opened by \tex. % Some active characters (such as <) are spaced differently in math. % We have to reset their definitions in case the @math was an argument % to a command which sets the catcodes (such as @item or @section). % { \catcode`^ = \active \catcode`< = \active \catcode`> = \active \catcode`+ = \active \catcode`' = \active \gdef\mathactive{% \let^ = \ptexhat \let< = \ptexless \let> = \ptexgtr \let+ = \ptexplus \let' = \ptexquoteright } } % for @sub and @sup, if in math mode, just do a normal sub/superscript. % If in text, use math to place as sub/superscript, but switch % into text mode, with smaller fonts. This is a different font than the % one used for real math sub/superscripts (8pt vs. 7pt), but let's not % fix it (significant additions to font machinery) until someone notices. % \def\sub{\ifmmode \expandafter\sb \else \expandafter\finishsub\fi} \def\finishsub#1{$\sb{\hbox{\switchtolllsize #1}}$}% % \def\sup{\ifmmode \expandafter\ptexsp \else \expandafter\finishsup\fi} \def\finishsup#1{$\ptexsp{\hbox{\switchtolllsize #1}}$}% % provide this command from LaTeX as it is very common \def\frac#1#2{{{#1}\over{#2}}} % @displaymath. % \globaldefs is needed to recognize the end lines in \tex and % \end tex. Set \thisenv as @end displaymath is seen before @end tex. {\obeylines \globaldefs=1 \envdef\displaymath{% \tex% \def\thisenv{\displaymath}% \begingroup\let\end\displaymathend% $$% } \def\displaymathend{$$\endgroup\end}% \def\Edisplaymath{% \def\thisenv{\tex}% \end tex }} % @inlinefmt{FMTNAME,PROCESSED-TEXT} and @inlineraw{FMTNAME,RAW-TEXT}. % Ignore unless FMTNAME == tex; then it is like @iftex and @tex, % except specified as a normal braced arg, so no newlines to worry about. % \def\outfmtnametex{tex} % \long\def\inlinefmt#1{\doinlinefmt #1,\finish} \long\def\doinlinefmt#1,#2,\finish{% \def\inlinefmtname{#1}% \ifx\inlinefmtname\outfmtnametex \ignorespaces #2\fi } % % @inlinefmtifelse{FMTNAME,THEN-TEXT,ELSE-TEXT} expands THEN-TEXT if % FMTNAME is tex, else ELSE-TEXT. \long\def\inlinefmtifelse#1{\doinlinefmtifelse #1,,,\finish} \long\def\doinlinefmtifelse#1,#2,#3,#4,\finish{% \def\inlinefmtname{#1}% \ifx\inlinefmtname\outfmtnametex \ignorespaces #2\else \ignorespaces #3\fi } % % For raw, must switch into @tex before parsing the argument, to avoid % setting catcodes prematurely. Doing it this way means that, for % example, @inlineraw{html, foo{bar} gets a parse error instead of being % ignored. But this isn't important because if people want a literal % *right* brace they would have to use a command anyway, so they may as % well use a command to get a left brace too. We could re-use the % delimiter character idea from \verb, but it seems like overkill. % \long\def\inlineraw{\tex \doinlineraw} \long\def\doinlineraw#1{\doinlinerawtwo #1,\finish} \def\doinlinerawtwo#1,#2,\finish{% \def\inlinerawname{#1}% \ifx\inlinerawname\outfmtnametex \ignorespaces #2\fi \endgroup % close group opened by \tex. } % @inlineifset{VAR, TEXT} expands TEXT if VAR is @set. % \long\def\inlineifset#1{\doinlineifset #1,\finish} \long\def\doinlineifset#1,#2,\finish{% \def\inlinevarname{#1}% \expandafter\ifx\csname SET\inlinevarname\endcsname\relax \else\ignorespaces#2\fi } % @inlineifclear{VAR, TEXT} expands TEXT if VAR is not @set. % \long\def\inlineifclear#1{\doinlineifclear #1,\finish} \long\def\doinlineifclear#1,#2,\finish{% \def\inlinevarname{#1}% \expandafter\ifx\csname SET\inlinevarname\endcsname\relax \ignorespaces#2\fi } \message{glyphs,} % and logos. % @@ prints an @, as does @atchar{}. \def\@{\char64 } \let\atchar=\@ % @{ @} @lbracechar{} @rbracechar{} all generate brace characters. \def\lbracechar{{\ifmonospace\char123\else\ensuremath\lbrace\fi}} \def\rbracechar{{\ifmonospace\char125\else\ensuremath\rbrace\fi}} \let\{=\lbracechar \let\}=\rbracechar % @comma{} to avoid , parsing problems. \let\comma = , % Accents: @, @dotaccent @ringaccent @ubaraccent @udotaccent % Others are defined by plain TeX: @` @' @" @^ @~ @= @u @v @H. \let\, = \ptexc \let\dotaccent = \ptexdot \def\ringaccent#1{{\accent23 #1}} \let\tieaccent = \ptext \let\ubaraccent = \ptexb \let\udotaccent = \d % Other special characters: @questiondown @exclamdown @ordf @ordm % Plain TeX defines: @AA @AE @O @OE @L (plus lowercase versions) @ss. \def\questiondown{?`} \def\exclamdown{!`} \def\ordf{\leavevmode\raise1ex\hbox{\switchtolllsize \underbar{a}}} \def\ordm{\leavevmode\raise1ex\hbox{\switchtolllsize \underbar{o}}} % Dotless i and dotless j, used for accents. \def\imacro{i} \def\jmacro{j} \def\dotless#1{% \def\temp{#1}% \ifx\temp\imacro \ifmmode\imath \else\ptexi \fi \else\ifx\temp\jmacro \ifmmode\jmath \else\j \fi \else \errmessage{@dotless can be used only with i or j}% \fi\fi } % The \TeX{} logo, as in plain, but resetting the spacing so that a % period following counts as ending a sentence. (Idea found in latex.) % \edef\TeX{\TeX \spacefactor=1000 } % @LaTeX{} logo. Not quite the same results as the definition in % latex.ltx, since we use a different font for the raised A; it's most % convenient for us to use an explicitly smaller font, rather than using % the \scriptstyle font (since we don't reset \scriptstyle and % \scriptscriptstyle). % \def\LaTeX{% L\kern-.36em {\setbox0=\hbox{T}% \vbox to \ht0{\hbox{% \ifx\textnominalsize\xwordpt % for 10pt running text, lllsize (8pt) is too small for the A in LaTeX. % Revert to plain's \scriptsize, which is 7pt. \count255=\the\fam $\fam\count255 \scriptstyle A$% \else % For 11pt, we can use our lllsize. \switchtolllsize A% \fi }% \vss }}% \kern-.15em \TeX } % Some math mode symbols. Define \ensuremath to switch into math mode % unless we are already there. Expansion tricks may not be needed here, % but safer, and can't hurt. \def\ensuremath{\ifmmode \expandafter\asis \else\expandafter\ensuredmath \fi} \def\ensuredmath#1{$\relax#1$} % \def\bullet{\ensuremath\ptexbullet} \def\geq{\ensuremath\ge} \def\leq{\ensuremath\le} \def\minus{\ensuremath-} % @dots{} outputs an ellipsis using the current font. % We do .5em per period so that it has the same spacing in the cm % typewriter fonts as three actual period characters; on the other hand, % in other typewriter fonts three periods are wider than 1.5em. So do % whichever is larger. % \def\dots{% \leavevmode \setbox0=\hbox{...}% get width of three periods \ifdim\wd0 > 1.5em \dimen0 = \wd0 \else \dimen0 = 1.5em \fi \hbox to \dimen0{% \hskip 0pt plus.25fil .\hskip 0pt plus1fil .\hskip 0pt plus1fil .\hskip 0pt plus.5fil }% } % @enddots{} is an end-of-sentence ellipsis. % \def\enddots{% \dots \spacefactor=\endofsentencespacefactor } % @point{}, @result{}, @expansion{}, @print{}, @equiv{}. % % Since these characters are used in examples, they should be an even number of % \tt widths. Each \tt character is 1en, so two makes it 1em. % \def\point{$\star$} \def\arrow{\leavevmode\raise.05ex\hbox to 1em{\hfil$\rightarrow$\hfil}} \def\result{\leavevmode\raise.05ex\hbox to 1em{\hfil$\Rightarrow$\hfil}} \def\expansion{\leavevmode\hbox to 1em{\hfil$\mapsto$\hfil}} \def\print{\leavevmode\lower.1ex\hbox to 1em{\hfil$\dashv$\hfil}} \def\equiv{\leavevmode\hbox to 1em{\hfil$\ptexequiv$\hfil}} % The @error{} command. % Adapted from the TeXbook's \boxit. % \newbox\errorbox % {\ttfont \global\dimen0 = 3em}% Width of the box. \dimen2 = .55pt % Thickness of rules % The text. (`r' is open on the right, `e' somewhat less so on the left.) \setbox0 = \hbox{\kern-.75pt \reducedsf \putworderror\kern-1.5pt} % \setbox\errorbox=\hbox to \dimen0{\hfil \hsize = \dimen0 \advance\hsize by -5.8pt % Space to left+right. \advance\hsize by -2\dimen2 % Rules. \vbox{% \hrule height\dimen2 \hbox{\vrule width\dimen2 \kern3pt % Space to left of text. \vtop{\kern2.4pt \box0 \kern2.4pt}% Space above/below. \kern3pt\vrule width\dimen2}% Space to right. \hrule height\dimen2} \hfil} % \def\error{\leavevmode\lower.7ex\copy\errorbox} % @pounds{} is a sterling sign, which Knuth put in the CM italic font. % \def\pounds{\ifmonospace{\ecfont\char"BF}\else{\it\$}\fi} % @euro{} comes from a separate font, depending on the current style. % We use the free feym* fonts from the eurosym package by Henrik % Theiling, which support regular, slanted, bold and bold slanted (and % "outlined" (blackboard board, sort of) versions, which we don't need). % It is available from http://www.ctan.org/tex-archive/fonts/eurosym. % % Although only regular is the truly official Euro symbol, we ignore % that. The Euro is designed to be slightly taller than the regular % font height. % % feymr - regular % feymo - slanted % feybr - bold % feybo - bold slanted % % There is no good (free) typewriter version, to my knowledge. % A feymr10 euro is ~7.3pt wide, while a normal cmtt10 char is ~5.25pt wide. % Hmm. % % Also doesn't work in math. Do we need to do math with euro symbols? % Hope not. % % \def\euro{{\eurofont e}} \def\eurofont{% % We set the font at each command, rather than predefining it in % \textfonts and the other font-switching commands, so that % installations which never need the symbol don't have to have the % font installed. % % There is only one designed size (nominal 10pt), so we always scale % that to the current nominal size. % % By the way, simply using "at 1em" works for cmr10 and the like, but % does not work for cmbx10 and other extended/shrunken fonts. % \def\eurosize{\csname\curfontsize nominalsize\endcsname}% % \ifx\curfontstyle\bfstylename % bold: \font\thiseurofont = \ifusingit{feybo10}{feybr10} at \eurosize \else % regular: \font\thiseurofont = \ifusingit{feymo10}{feymr10} at \eurosize \fi \thiseurofont } % Glyphs from the EC fonts. We don't use \let for the aliases, because % sometimes we redefine the original macro, and the alias should reflect % the redefinition. % % Use LaTeX names for the Icelandic letters. \def\DH{{\ecfont \char"D0}} % Eth \def\dh{{\ecfont \char"F0}} % eth \def\TH{{\ecfont \char"DE}} % Thorn \def\th{{\ecfont \char"FE}} % thorn % \def\guillemetleft{{\ecfont \char"13}} \def\guillemotleft{\guillemetleft} \def\guillemetright{{\ecfont \char"14}} \def\guillemotright{\guillemetright} \def\guilsinglleft{{\ecfont \char"0E}} \def\guilsinglright{{\ecfont \char"0F}} \def\quotedblbase{{\ecfont \char"12}} \def\quotesinglbase{{\ecfont \char"0D}} % % This positioning is not perfect (see the ogonek LaTeX package), but % we have the precomposed glyphs for the most common cases. We put the % tests to use those glyphs in the single \ogonek macro so we have fewer % dummy definitions to worry about for index entries, etc. % % ogonek is also used with other letters in Lithuanian (IOU), but using % the precomposed glyphs for those is not so easy since they aren't in % the same EC font. \def\ogonek#1{{% \def\temp{#1}% \ifx\temp\macrocharA\Aogonek \else\ifx\temp\macrochara\aogonek \else\ifx\temp\macrocharE\Eogonek \else\ifx\temp\macrochare\eogonek \else \ecfont \setbox0=\hbox{#1}% \ifdim\ht0=1ex\accent"0C #1% \else\ooalign{\unhbox0\crcr\hidewidth\char"0C \hidewidth}% \fi \fi\fi\fi\fi }% } \def\Aogonek{{\ecfont \char"81}}\def\macrocharA{A} \def\aogonek{{\ecfont \char"A1}}\def\macrochara{a} \def\Eogonek{{\ecfont \char"86}}\def\macrocharE{E} \def\eogonek{{\ecfont \char"A6}}\def\macrochare{e} % % Use the European Computer Modern fonts (cm-super in outline format) % for non-CM glyphs. That is ec* for regular text and tc* for the text % companion symbols (LaTeX TS1 encoding). Both are part of the ec % package and follow the same conventions. % \def\ecfont{\etcfont{e}} \def\tcfont{\etcfont{t}} % \def\etcfont#1{% % We can't distinguish serif/sans and italic/slanted, but this % is used for crude hacks anyway (like adding French and German % quotes to documents typeset with CM, where we lose kerning), so % hopefully nobody will notice/care. \edef\ecsize{\csname\curfontsize ecsize\endcsname}% \edef\nominalsize{\csname\curfontsize nominalsize\endcsname}% \ifmonospace % typewriter: \font\thisecfont = #1ctt\ecsize \space at \nominalsize \else \ifx\curfontstyle\bfstylename % bold: \font\thisecfont = #1cb\ifusingit{i}{x}\ecsize \space at \nominalsize \else % regular: \font\thisecfont = #1c\ifusingit{ti}{rm}\ecsize \space at \nominalsize \fi \fi \thisecfont } % @registeredsymbol - R in a circle. The font for the R should really % be smaller yet, but lllsize is the best we can do for now. % Adapted from the plain.tex definition of \copyright. % \def\registeredsymbol{% $^{{\ooalign{\hfil\raise.07ex\hbox{\switchtolllsize R}% \hfil\crcr\Orb}}% }$% } % @textdegree - the normal degrees sign. % \def\textdegree{$^\circ$} % Laurent Siebenmann reports \Orb undefined with: % Textures 1.7.7 (preloaded format=plain 93.10.14) (68K) 16 APR 2004 02:38 % so we'll define it if necessary. % \ifx\Orb\thisisundefined \def\Orb{\mathhexbox20D} \fi % Quotes. \chardef\quoteleft=`\` \chardef\quoteright=`\' % only change font for tt for correct kerning and to avoid using % \ecfont unless necessary. \def\quotedblleft{% \ifmonospace{\ecfont\char"10}\else{\char"5C}\fi } \def\quotedblright{% \ifmonospace{\ecfont\char"11}\else{\char`\"}\fi } \message{page headings,} \newskip\titlepagetopglue \titlepagetopglue = 1.5in \newskip\titlepagebottomglue \titlepagebottomglue = 2pc % First the title page. Must do @settitle before @titlepage. \newif\ifseenauthor \newif\iffinishedtitlepage % @setcontentsaftertitlepage used to do an implicit @contents or % @shortcontents after @end titlepage, but it is now obsolete. \def\setcontentsaftertitlepage{% \errmessage{@setcontentsaftertitlepage has been removed as a Texinfo command; move your @contents command if you want the contents after the title page.}}% \def\setshortcontentsaftertitlepage{% \errmessage{@setshortcontentsaftertitlepage has been removed as a Texinfo command; move your @shortcontents and @contents commands if you want the contents after the title page.}}% \parseargdef\shorttitlepage{% \begingroup \hbox{}\vskip 1.5in \chaprm \centerline{#1}% \endgroup\page\hbox{}\page} \envdef\titlepage{% % Open one extra group, as we want to close it in the middle of \Etitlepage. \begingroup \parindent=0pt \textfonts % Leave some space at the very top of the page. \vglue\titlepagetopglue % No rule at page bottom unless we print one at the top with @title. \finishedtitlepagetrue % % Most title ``pages'' are actually two pages long, with space % at the top of the second. We don't want the ragged left on the second. \let\oldpage = \page \def\page{% \iffinishedtitlepage\else \finishtitlepage \fi \let\page = \oldpage \page \null }% } \def\Etitlepage{% \iffinishedtitlepage\else \finishtitlepage \fi % It is important to do the page break before ending the group, % because the headline and footline are only empty inside the group. % If we use the new definition of \page, we always get a blank page % after the title page, which we certainly don't want. \oldpage \endgroup % % Need this before the \...aftertitlepage checks so that if they are % in effect the toc pages will come out with page numbers. \HEADINGSon } \def\finishtitlepage{% \vskip4pt \hrule height 2pt width \hsize \vskip\titlepagebottomglue \finishedtitlepagetrue } % Settings used for typesetting titles: no hyphenation, no indentation, % don't worry much about spacing, ragged right. This should be used % inside a \vbox, and fonts need to be set appropriately first. \par should % be specified before the end of the \vbox, since a vbox is a group. % \def\raggedtitlesettings{% \rm \hyphenpenalty=10000 \parindent=0pt \tolerance=5000 \ptexraggedright } % Macros to be used within @titlepage: \let\subtitlerm=\rmfont \def\subtitlefont{\subtitlerm \normalbaselineskip = 13pt \normalbaselines} \parseargdef\title{% \checkenv\titlepage \vbox{\titlefonts \raggedtitlesettings #1\par}% % print a rule at the page bottom also. \finishedtitlepagefalse \vskip4pt \hrule height 4pt width \hsize \vskip4pt } \parseargdef\subtitle{% \checkenv\titlepage {\subtitlefont \rightline{#1}}% } % @author should come last, but may come many times. % It can also be used inside @quotation. % \parseargdef\author{% \def\temp{\quotation}% \ifx\thisenv\temp \def\quotationauthor{#1}% printed in \Equotation. \else \checkenv\titlepage \ifseenauthor\else \vskip 0pt plus 1filll \seenauthortrue \fi {\secfonts\rm \leftline{#1}}% \fi } % Set up page headings and footings. \let\thispage=\folio \newtoks\evenheadline % headline on even pages \newtoks\oddheadline % headline on odd pages \newtoks\evenchapheadline% headline on even pages with a new chapter \newtoks\oddchapheadline % headline on odd pages with a new chapter \newtoks\evenfootline % footline on even pages \newtoks\oddfootline % footline on odd pages % Now make \makeheadline and \makefootline in Plain TeX use those variables \headline={{\textfonts\rm \ifchapterpage \ifodd\pageno\the\oddchapheadline\else\the\evenchapheadline\fi \else \ifodd\pageno\the\oddheadline\else\the\evenheadline\fi \fi}} \footline={{\textfonts\rm \ifodd\pageno \the\oddfootline \else \the\evenfootline \fi}\HEADINGShook} \let\HEADINGShook=\relax % Commands to set those variables. % For example, this is what @headings on does % @evenheading @thistitle|@thispage|@thischapter % @oddheading @thischapter|@thispage|@thistitle % @evenfooting @thisfile|| % @oddfooting ||@thisfile \def\evenheading{\parsearg\evenheadingxxx} \def\evenheadingxxx #1{\evenheadingyyy #1\|\|\|\|\finish} \def\evenheadingyyy #1\|#2\|#3\|#4\finish{% \global\evenheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}} \global\evenchapheadline=\evenheadline} \def\oddheading{\parsearg\oddheadingxxx} \def\oddheadingxxx #1{\oddheadingyyy #1\|\|\|\|\finish} \def\oddheadingyyy #1\|#2\|#3\|#4\finish{% \global\oddheadline={\rlap{\centerline{#2}}\line{#1\hfil#3}}% \global\oddchapheadline=\oddheadline} \parseargdef\everyheading{\oddheadingxxx{#1}\evenheadingxxx{#1}}% \def\evenfooting{\parsearg\evenfootingxxx} \def\evenfootingxxx #1{\evenfootingyyy #1\|\|\|\|\finish} \def\evenfootingyyy #1\|#2\|#3\|#4\finish{% \global\evenfootline={\rlap{\centerline{#2}}\line{#1\hfil#3}}} \def\oddfooting{\parsearg\oddfootingxxx} \def\oddfootingxxx #1{\oddfootingyyy #1\|\|\|\|\finish} \def\oddfootingyyy #1\|#2\|#3\|#4\finish{% \global\oddfootline = {\rlap{\centerline{#2}}\line{#1\hfil#3}}% % % Leave some space for the footline. Hopefully ok to assume % @evenfooting will not be used by itself. \global\advance\txipageheight by -12pt \global\advance\vsize by -12pt } \parseargdef\everyfooting{\oddfootingxxx{#1}\evenfootingxxx{#1}} % @evenheadingmarks top \thischapter <- chapter at the top of a page % @evenheadingmarks bottom \thischapter <- chapter at the bottom of a page % % The same set of arguments for: % % @oddheadingmarks % @evenfootingmarks % @oddfootingmarks % @everyheadingmarks % @everyfootingmarks % These define \getoddheadingmarks, \getevenheadingmarks, % \getoddfootingmarks, and \getevenfootingmarks, each to one of % \gettopheadingmarks, \getbottomheadingmarks. % \def\evenheadingmarks{\headingmarks{even}{heading}} \def\oddheadingmarks{\headingmarks{odd}{heading}} \def\evenfootingmarks{\headingmarks{even}{footing}} \def\oddfootingmarks{\headingmarks{odd}{footing}} \parseargdef\everyheadingmarks{\headingmarks{even}{heading}{#1} \headingmarks{odd}{heading}{#1} } \parseargdef\everyfootingmarks{\headingmarks{even}{footing}{#1} \headingmarks{odd}{footing}{#1} } % #1 = even/odd, #2 = heading/footing, #3 = top/bottom. \def\headingmarks#1#2#3 {% \expandafter\let\expandafter\temp \csname get#3headingmarks\endcsname \global\expandafter\let\csname get#1#2marks\endcsname \temp } \everyheadingmarks bottom \everyfootingmarks bottom % @headings double turns headings on for double-sided printing. % @headings single turns headings on for single-sided printing. % @headings off turns them off. % @headings on same as @headings double, retained for compatibility. % @headings after turns on double-sided headings after this page. % @headings doubleafter turns on double-sided headings after this page. % @headings singleafter turns on single-sided headings after this page. % By default, they are off at the start of a document, % and turned `on' after @end titlepage. \parseargdef\headings{\csname HEADINGS#1\endcsname} \def\headingsoff{% non-global headings elimination \evenheadline={\hfil}\evenfootline={\hfil}\evenchapheadline={\hfil}% \oddheadline={\hfil}\oddfootline={\hfil}\oddchapheadline={\hfil}% } \def\HEADINGSoff{{\globaldefs=1 \headingsoff}} % global setting \HEADINGSoff % it's the default % When we turn headings on, set the page number to 1. \def\pageone{ \global\pageno=1 \global\arabiccount = \pagecount } % For double-sided printing, put current file name in lower left corner, % chapter name on inside top of right hand pages, document % title on inside top of left hand pages, and page numbers on outside top % edge of all pages. \def\HEADINGSdouble{% \pageone \HEADINGSdoublex } \let\contentsalignmacro = \chappager % For single-sided printing, chapter title goes across top left of page, % page number on top right. \def\HEADINGSsingle{% \pageone \HEADINGSsinglex } \def\HEADINGSon{\HEADINGSdouble} \def\HEADINGSafter{\let\HEADINGShook=\HEADINGSdoublex} \let\HEADINGSdoubleafter=\HEADINGSafter \def\HEADINGSdoublex{% \global\evenfootline={\hfil} \global\oddfootline={\hfil} \global\evenheadline={\line{\folio\hfil\thistitle}} \global\oddheadline={\line{\thischapter\hfil\folio}} \global\evenchapheadline={\line{\folio\hfil}} \global\oddchapheadline={\line{\hfil\folio}} \global\let\contentsalignmacro = \chapoddpage } \def\HEADINGSsingleafter{\let\HEADINGShook=\HEADINGSsinglex} \def\HEADINGSsinglex{% \global\evenfootline={\hfil} \global\oddfootline={\hfil} \global\evenheadline={\line{\thischapter\hfil\folio}} \global\oddheadline={\line{\thischapter\hfil\folio}} \global\evenchapheadline={\line{\hfil\folio}} \global\oddchapheadline={\line{\hfil\folio}} \global\let\contentsalignmacro = \chappager } % for @setchapternewpage off \def\HEADINGSsinglechapoff{% \pageone \global\evenfootline={\hfil} \global\oddfootline={\hfil} \global\evenheadline={\line{\thischapter\hfil\folio}} \global\oddheadline={\line{\thischapter\hfil\folio}} \global\evenchapheadline=\evenheadline \global\oddchapheadline=\oddheadline \global\let\contentsalignmacro = \chappager } % Subroutines used in generating headings % This produces Day Month Year style of output. % Only define if not already defined, in case a txi-??.tex file has set % up a different format (e.g., txi-cs.tex does this). \ifx\today\thisisundefined \def\today{% \number\day\space \ifcase\month \or\putwordMJan\or\putwordMFeb\or\putwordMMar\or\putwordMApr \or\putwordMMay\or\putwordMJun\or\putwordMJul\or\putwordMAug \or\putwordMSep\or\putwordMOct\or\putwordMNov\or\putwordMDec \fi \space\number\year} \fi % @settitle line... specifies the title of the document, for headings. % It generates no output of its own. \def\thistitle{\putwordNoTitle} \def\settitle{\parsearg{\gdef\thistitle}} \message{tables,} % Tables -- @table, @ftable, @vtable, @item(x). % default indentation of table text \newdimen\tableindent \tableindent=.8in % default indentation of @itemize and @enumerate text \newdimen\itemindent \itemindent=.3in % margin between end of table item and start of table text. \newdimen\itemmargin \itemmargin=.1in % used internally for \itemindent minus \itemmargin \newdimen\itemmax % Note @table, @ftable, and @vtable define @item, @itemx, etc., with % these defs. % They also define \itemindex % to index the item name in whatever manner is desired (perhaps none). \newif\ifitemxneedsnegativevskip \def\itemxpar{\par\ifitemxneedsnegativevskip\nobreak\vskip-\parskip\nobreak\fi} \def\internalBitem{\smallbreak \parsearg\itemzzz} \def\internalBitemx{\itemxpar \parsearg\itemzzz} \def\itemzzz #1{\begingroup % \advance\hsize by -\rightskip \advance\hsize by -\tableindent \setbox0=\hbox{\itemindicate{#1}}% \itemindex{#1}% \nobreak % This prevents a break before @itemx. % % If the item text does not fit in the space we have, put it on a line % by itself, and do not allow a page break either before or after that % line. We do not start a paragraph here because then if the next % command is, e.g., @kindex, the whatsit would get put into the % horizontal list on a line by itself, resulting in extra blank space. \ifdim \wd0>\itemmax % % Make this a paragraph so we get the \parskip glue and wrapping, % but leave it ragged-right. \begingroup \advance\leftskip by-\tableindent \advance\hsize by\tableindent \advance\rightskip by0pt plus1fil\relax \leavevmode\unhbox0\par \endgroup % % We're going to be starting a paragraph, but we don't want the % \parskip glue -- logically it's part of the @item we just started. \nobreak \vskip-\parskip % % Stop a page break at the \parskip glue coming up. However, if % what follows is an environment such as @example, there will be no % \parskip glue; then the negative vskip we just inserted would % cause the example and the item to crash together. So we use this % bizarre value of 10001 as a signal to \aboveenvbreak to insert % \parskip glue after all. Section titles are handled this way also. % \penalty 10001 \endgroup \itemxneedsnegativevskipfalse \else % The item text fits into the space. Start a paragraph, so that the % following text (if any) will end up on the same line. \noindent % Do this with kerns and \unhbox so that if there is a footnote in % the item text, it can migrate to the main vertical list and % eventually be printed. \nobreak\kern-\tableindent \dimen0 = \itemmax \advance\dimen0 by \itemmargin \advance\dimen0 by -\wd0 \unhbox0 \nobreak\kern\dimen0 \endgroup \itemxneedsnegativevskiptrue \fi } \def\item{\errmessage{@item while not in a list environment}} \def\itemx{\errmessage{@itemx while not in a list environment}} % @table, @ftable, @vtable. \envdef\table{% \let\itemindex\gobble \tablecheck{table}% } \envdef\ftable{% \def\itemindex ##1{\doind {fn}{\code{##1}}}% \tablecheck{ftable}% } \envdef\vtable{% \def\itemindex ##1{\doind {vr}{\code{##1}}}% \tablecheck{vtable}% } \def\tablecheck#1{% \ifnum \the\catcode`\^^M=\active \endgroup \errmessage{This command won't work in this context; perhaps the problem is that we are \inenvironment\thisenv}% \def\next{\doignore{#1}}% \else \let\next\tablex \fi \next } \def\tablex#1{% \def\itemindicate{#1}% \parsearg\tabley } \def\tabley#1{% {% \makevalueexpandable \edef\temp{\noexpand\tablez #1\space\space\space}% \expandafter }\temp \endtablez } \def\tablez #1 #2 #3 #4\endtablez{% \aboveenvbreak \ifnum 0#1>0 \advance \leftskip by #1\mil \fi \ifnum 0#2>0 \tableindent=#2\mil \fi \ifnum 0#3>0 \advance \rightskip by #3\mil \fi \itemmax=\tableindent \advance \itemmax by -\itemmargin \advance \leftskip by \tableindent \exdentamount=\tableindent \parindent = 0pt \parskip = \smallskipamount \ifdim \parskip=0pt \parskip=2pt \fi \let\item = \internalBitem \let\itemx = \internalBitemx } \def\Etable{\endgraf\afterenvbreak} \let\Eftable\Etable \let\Evtable\Etable \let\Eitemize\Etable \let\Eenumerate\Etable % This is the counter used by @enumerate, which is really @itemize \newcount \itemno \envdef\itemize{\parsearg\doitemize} \def\doitemize#1{% \aboveenvbreak \itemmax=\itemindent \advance\itemmax by -\itemmargin \advance\leftskip by \itemindent \exdentamount=\itemindent \parindent=0pt \parskip=\smallskipamount \ifdim\parskip=0pt \parskip=2pt \fi % % Try typesetting the item mark so that if the document erroneously says % something like @itemize @samp (intending @table), there's an error % right away at the @itemize. It's not the best error message in the % world, but it's better than leaving it to the @item. This means if % the user wants an empty mark, they have to say @w{} not just @w. \def\itemcontents{#1}% \setbox0 = \hbox{\itemcontents}% % % @itemize with no arg is equivalent to @itemize @bullet. \ifx\itemcontents\empty\def\itemcontents{\bullet}\fi % \let\item=\itemizeitem } % Definition of @item while inside @itemize and @enumerate. % \def\itemizeitem{% \advance\itemno by 1 % for enumerations {\let\par=\endgraf \smallbreak}% reasonable place to break {% % If the document has an @itemize directly after a section title, a % \nobreak will be last on the list, and \sectionheading will have % done a \vskip-\parskip. In that case, we don't want to zero % parskip, or the item text will crash with the heading. On the % other hand, when there is normal text preceding the item (as there % usually is), we do want to zero parskip, or there would be too much % space. In that case, we won't have a \nobreak before. At least % that's the theory. \ifnum\lastpenalty<10000 \parskip=0in \fi \noindent \hbox to 0pt{\hss \itemcontents \kern\itemmargin}% % \ifinner\else \vadjust{\penalty 1200}% not good to break after first line of item. \fi % We can be in inner vertical mode in a footnote, although an % @itemize looks awful there. }% \flushcr } % \splitoff TOKENS\endmark defines \first to be the first token in % TOKENS, and \rest to be the remainder. % \def\splitoff#1#2\endmark{\def\first{#1}\def\rest{#2}}% % Allow an optional argument of an uppercase letter, lowercase letter, % or number, to specify the first label in the enumerated list. No % argument is the same as `1'. % \envparseargdef\enumerate{\enumeratey #1 \endenumeratey} \def\enumeratey #1 #2\endenumeratey{% % If we were given no argument, pretend we were given `1'. \def\thearg{#1}% \ifx\thearg\empty \def\thearg{1}\fi % % Detect if the argument is a single token. If so, it might be a % letter. Otherwise, the only valid thing it can be is a number. % (We will always have one token, because of the test we just made. % This is a good thing, since \splitoff doesn't work given nothing at % all -- the first parameter is undelimited.) \expandafter\splitoff\thearg\endmark \ifx\rest\empty % Only one token in the argument. It could still be anything. % A ``lowercase letter'' is one whose \lccode is nonzero. % An ``uppercase letter'' is one whose \lccode is both nonzero, and % not equal to itself. % Otherwise, we assume it's a number. % % We need the \relax at the end of the \ifnum lines to stop TeX from % continuing to look for a . % \ifnum\lccode\expandafter`\thearg=0\relax \numericenumerate % a number (we hope) \else % It's a letter. \ifnum\lccode\expandafter`\thearg=\expandafter`\thearg\relax \lowercaseenumerate % lowercase letter \else \uppercaseenumerate % uppercase letter \fi \fi \else % Multiple tokens in the argument. We hope it's a number. \numericenumerate \fi } % An @enumerate whose labels are integers. The starting integer is % given in \thearg. % \def\numericenumerate{% \itemno = \thearg \startenumeration{\the\itemno}% } % The starting (lowercase) letter is in \thearg. \def\lowercaseenumerate{% \itemno = \expandafter`\thearg \startenumeration{% % Be sure we're not beyond the end of the alphabet. \ifnum\itemno=0 \errmessage{No more lowercase letters in @enumerate; get a bigger alphabet}% \fi \char\lccode\itemno }% } % The starting (uppercase) letter is in \thearg. \def\uppercaseenumerate{% \itemno = \expandafter`\thearg \startenumeration{% % Be sure we're not beyond the end of the alphabet. \ifnum\itemno=0 \errmessage{No more uppercase letters in @enumerate; get a bigger alphabet} \fi \char\uccode\itemno }% } % Call \doitemize, adding a period to the first argument and supplying the % common last two arguments. Also subtract one from the initial value in % \itemno, since @item increments \itemno. % \def\startenumeration#1{% \advance\itemno by -1 \doitemize{#1.}\flushcr } % @alphaenumerate and @capsenumerate are abbreviations for giving an arg % to @enumerate. % \def\alphaenumerate{\enumerate{a}} \def\capsenumerate{\enumerate{A}} \def\Ealphaenumerate{\Eenumerate} \def\Ecapsenumerate{\Eenumerate} % @multitable macros % Amy Hendrickson, 8/18/94, 3/6/96 % % @multitable ... @end multitable will make as many columns as desired. % Contents of each column will wrap at width given in preamble. Width % can be specified either with sample text given in a template line, % or in percent of \hsize, the current width of text on page. % Table can continue over pages but will only break between lines. % To make preamble: % % Either define widths of columns in terms of percent of \hsize: % @multitable @columnfractions .25 .3 .45 % @item ... % % Numbers following @columnfractions are the percent of the total % current hsize to be used for each column. You may use as many % columns as desired. % Or use a template: % @multitable {Column 1 template} {Column 2 template} {Column 3 template} % @item ... % using the widest term desired in each column. % Each new table line starts with @item, each subsequent new column % starts with @tab. Empty columns may be produced by supplying @tab's % with nothing between them for as many times as empty columns are needed, % ie, @tab@tab@tab will produce two empty columns. % @item, @tab do not need to be on their own lines, but it will not hurt % if they are. % Sample multitable: % @multitable {Column 1 template} {Column 2 template} {Column 3 template} % @item first col stuff @tab second col stuff @tab third col % @item % first col stuff % @tab % second col stuff % @tab % third col % @item first col stuff @tab second col stuff % @tab Many paragraphs of text may be used in any column. % % They will wrap at the width determined by the template. % @item@tab@tab This will be in third column. % @end multitable % Default dimensions may be reset by user. % @multitableparskip is vertical space between paragraphs in table. % @multitableparindent is paragraph indent in table. % @multitablecolmargin is horizontal space to be left between columns. % @multitablelinespace is space to leave between table items, baseline % to baseline. % 0pt means it depends on current normal line spacing. % \newskip\multitableparskip \newskip\multitableparindent \newdimen\multitablecolspace \newskip\multitablelinespace \multitableparskip=0pt \multitableparindent=6pt \multitablecolspace=12pt \multitablelinespace=0pt % Macros used to set up halign preamble: % \let\endsetuptable\relax \def\xendsetuptable{\endsetuptable} \let\columnfractions\relax \def\xcolumnfractions{\columnfractions} \newif\ifsetpercent % #1 is the @columnfraction, usually a decimal number like .5, but might % be just 1. We just use it, whatever it is. % \def\pickupwholefraction#1 {% \global\advance\colcount by 1 \expandafter\xdef\csname col\the\colcount\endcsname{#1\hsize}% \setuptable } \newcount\colcount \def\setuptable#1{% \def\firstarg{#1}% \ifx\firstarg\xendsetuptable \let\go = \relax \else \ifx\firstarg\xcolumnfractions \global\setpercenttrue \else \ifsetpercent \let\go\pickupwholefraction \else \global\advance\colcount by 1 \setbox0=\hbox{#1\unskip\space}% Add a normal word space as a % separator; typically that is always in the input, anyway. \expandafter\xdef\csname col\the\colcount\endcsname{\the\wd0}% \fi \fi \ifx\go\pickupwholefraction % Put the argument back for the \pickupwholefraction call, so % we'll always have a period there to be parsed. \def\go{\pickupwholefraction#1}% \else \let\go = \setuptable \fi% \fi \go } % multitable-only commands. % % @headitem starts a heading row, which we typeset in bold. Assignments % have to be global since we are inside the implicit group of an % alignment entry. \everycr below resets \everytab so we don't have to % undo it ourselves. \def\headitemfont{\b}% for people to use in the template row; not changeable \def\headitem{% \checkenv\multitable \crcr \gdef\headitemcrhook{\nobreak}% attempt to avoid page break after headings \global\everytab={\bf}% can't use \headitemfont since the parsing differs \the\everytab % for the first item }% % % default for tables with no headings. \let\headitemcrhook=\relax % % A \tab used to include \hskip1sp. But then the space in a template % line is not enough. That is bad. So let's go back to just `&' until % we again encounter the problem the 1sp was intended to solve. % --karl, nathan@acm.org, 20apr99. \def\tab{\checkenv\multitable &\the\everytab}% % @multitable ... @end multitable definitions: % \newtoks\everytab % insert after every tab. % \envdef\multitable{% \vskip\parskip \startsavinginserts % % @item within a multitable starts a normal row. % We use \def instead of \let so that if one of the multitable entries % contains an @itemize, we don't choke on the \item (seen as \crcr aka % \endtemplate) expanding \doitemize. \def\item{\crcr}% % \tolerance=9500 \hbadness=9500 \setmultitablespacing \parskip=\multitableparskip \parindent=\multitableparindent \overfullrule=0pt \global\colcount=0 % \everycr = {% \noalign{% \global\everytab={}% Reset from possible headitem. \global\colcount=0 % Reset the column counter. % % Check for saved footnotes, etc.: \checkinserts % % Perhaps a \nobreak, then reset: \headitemcrhook \global\let\headitemcrhook=\relax }% }% % \parsearg\domultitable } \def\domultitable#1{% % To parse everything between @multitable and @item: \setuptable#1 \endsetuptable % % This preamble sets up a generic column definition, which will % be used as many times as user calls for columns. % \vtop will set a single line and will also let text wrap and % continue for many paragraphs if desired. \halign\bgroup &% \global\advance\colcount by 1 \multistrut \vtop{% % Use the current \colcount to find the correct column width: \hsize=\expandafter\csname col\the\colcount\endcsname % % In order to keep entries from bumping into each other % we will add a \leftskip of \multitablecolspace to all columns after % the first one. % % If a template has been used, we will add \multitablecolspace % to the width of each template entry. % % If the user has set preamble in terms of percent of \hsize we will % use that dimension as the width of the column, and the \leftskip % will keep entries from bumping into each other. Table will start at % left margin and final column will justify at right margin. % % Make sure we don't inherit \rightskip from the outer environment. \rightskip=0pt \ifnum\colcount=1 % The first column will be indented with the surrounding text. \advance\hsize by\leftskip \else \ifsetpercent \else % If user has not set preamble in terms of percent of \hsize % we will advance \hsize by \multitablecolspace. \advance\hsize by \multitablecolspace \fi % In either case we will make \leftskip=\multitablecolspace: \leftskip=\multitablecolspace \fi % Ignoring space at the beginning and end avoids an occasional spurious % blank line, when TeX decides to break the line at the space before the % box from the multistrut, so the strut ends up on a line by itself. % For example: % @multitable @columnfractions .11 .89 % @item @code{#} % @tab Legal holiday which is valid in major parts of the whole country. % Is automatically provided with highlighting sequences respectively % marking characters. \noindent\ignorespaces##\unskip\multistrut }\cr } \def\Emultitable{% \crcr \egroup % end the \halign \global\setpercentfalse } \def\setmultitablespacing{% \def\multistrut{\strut}% just use the standard line spacing % % Compute \multitablelinespace (if not defined by user) for use in % \multitableparskip calculation. We used define \multistrut based on % this, but (ironically) that caused the spacing to be off. % See bug-texinfo report from Werner Lemberg, 31 Oct 2004 12:52:20 +0100. \ifdim\multitablelinespace=0pt \setbox0=\vbox{X}\global\multitablelinespace=\the\baselineskip \global\advance\multitablelinespace by-\ht0 \fi % Test to see if parskip is larger than space between lines of % table. If not, do nothing. % If so, set to same dimension as multitablelinespace. \ifdim\multitableparskip>\multitablelinespace \global\multitableparskip=\multitablelinespace \global\advance\multitableparskip-7pt % to keep parskip somewhat smaller % than skip between lines in the table. \fi% \ifdim\multitableparskip=0pt \global\multitableparskip=\multitablelinespace \global\advance\multitableparskip-7pt % to keep parskip somewhat smaller % than skip between lines in the table. \fi} \message{conditionals,} % @iftex, @ifnotdocbook, @ifnothtml, @ifnotinfo, @ifnotplaintext, % @ifnotxml always succeed. They currently do nothing; we don't % attempt to check whether the conditionals are properly nested. But we % have to remember that they are conditionals, so that @end doesn't % attempt to close an environment group. % \def\makecond#1{% \expandafter\let\csname #1\endcsname = \relax \expandafter\let\csname iscond.#1\endcsname = 1 } \makecond{iftex} \makecond{ifnotdocbook} \makecond{ifnothtml} \makecond{ifnotinfo} \makecond{ifnotplaintext} \makecond{ifnotxml} % Ignore @ignore, @ifhtml, @ifinfo, and the like. % \def\direntry{\doignore{direntry}} \def\documentdescription{\doignore{documentdescription}} \def\docbook{\doignore{docbook}} \def\html{\doignore{html}} \def\ifdocbook{\doignore{ifdocbook}} \def\ifhtml{\doignore{ifhtml}} \def\ifinfo{\doignore{ifinfo}} \def\ifnottex{\doignore{ifnottex}} \def\ifplaintext{\doignore{ifplaintext}} \def\ifxml{\doignore{ifxml}} \def\ignore{\doignore{ignore}} \def\menu{\doignore{menu}} \def\xml{\doignore{xml}} % Ignore text until a line `@end #1', keeping track of nested conditionals. % % A count to remember the depth of nesting. \newcount\doignorecount \def\doignore#1{\begingroup % Scan in ``verbatim'' mode: \obeylines \catcode`\@ = \other \catcode`\{ = \other \catcode`\} = \other % % Make sure that spaces turn into tokens that match what \doignoretext wants. \spaceisspace % % Count number of #1's that we've seen. \doignorecount = 0 % % Swallow text until we reach the matching `@end #1'. \dodoignore{#1}% } { \catcode`_=11 % We want to use \_STOP_ which cannot appear in texinfo source. \obeylines % % \gdef\dodoignore#1{% % #1 contains the command name as a string, e.g., `ifinfo'. % % Define a command to find the next `@end #1'. \long\def\doignoretext##1^^M@end #1{% \doignoretextyyy##1^^M@#1\_STOP_}% % % And this command to find another #1 command, at the beginning of a % line. (Otherwise, we would consider a line `@c @ifset', for % example, to count as an @ifset for nesting.) \long\def\doignoretextyyy##1^^M@#1##2\_STOP_{\doignoreyyy{##2}\_STOP_}% % % And now expand that command. \doignoretext ^^M% }% } \def\doignoreyyy#1{% \def\temp{#1}% \ifx\temp\empty % Nothing found. \let\next\doignoretextzzz \else % Found a nested condition, ... \advance\doignorecount by 1 \let\next\doignoretextyyy % ..., look for another. % If we're here, #1 ends with ^^M\ifinfo (for example). \fi \next #1% the token \_STOP_ is present just after this macro. } % We have to swallow the remaining "\_STOP_". % \def\doignoretextzzz#1{% \ifnum\doignorecount = 0 % We have just found the outermost @end. \let\next\enddoignore \else % Still inside a nested condition. \advance\doignorecount by -1 \let\next\doignoretext % Look for the next @end. \fi \next } % Finish off ignored text. { \obeylines% % Ignore anything after the last `@end #1'; this matters in verbatim % environments, where otherwise the newline after an ignored conditional % would result in a blank line in the output. \gdef\enddoignore#1^^M{\endgroup\ignorespaces}% } % @set VAR sets the variable VAR to an empty value. % @set VAR REST-OF-LINE sets VAR to the value REST-OF-LINE. % % Since we want to separate VAR from REST-OF-LINE (which might be % empty), we can't just use \parsearg; we have to insert a space of our % own to delimit the rest of the line, and then take it out again if we % didn't need it. % We rely on the fact that \parsearg sets \catcode`\ =10. % \parseargdef\set{\setyyy#1 \endsetyyy} \def\setyyy#1 #2\endsetyyy{% {% \makevalueexpandable \def\temp{#2}% \edef\next{\gdef\makecsname{SET#1}}% \ifx\temp\empty \next{}% \else \setzzz#2\endsetzzz \fi }% } % Remove the trailing space \setxxx inserted. \def\setzzz#1 \endsetzzz{\next{#1}} % @clear VAR clears (i.e., unsets) the variable VAR. % \parseargdef\clear{% {% \makevalueexpandable \global\expandafter\let\csname SET#1\endcsname=\relax }% } % @value{foo} gets the text saved in variable foo. \def\value{\begingroup\makevalueexpandable\valuexxx} \def\valuexxx#1{\expandablevalue{#1}\endgroup} { \catcode`\-=\active \catcode`\_=\active % \gdef\makevalueexpandable{% \let\value = \expandablevalue % We don't want these characters active, ... \catcode`\-=\other \catcode`\_=\other % ..., but we might end up with active ones in the argument if % we're called from @code, as @code{@value{foo-bar_}}, though. % So \let them to their normal equivalents. \let-\normaldash \let_\normalunderscore } } \def\expandablevalue#1{% \expandafter\ifx\csname SET#1\endcsname\relax {[No value for ``#1'']}% \message{Variable `#1', used in @value, is not set.}% \else \csname SET#1\endcsname \fi } % Like \expandablevalue, but completely expandable (the \message in the % definition above operates at the execution level of TeX). Used when % writing to auxiliary files, due to the expansion that \write does. % If flag is undefined, pass through an unexpanded @value command: maybe it % will be set by the time it is read back in. % % NB flag names containing - or _ may not work here. \def\dummyvalue#1{% \expandafter\ifx\csname SET#1\endcsname\relax \string\value{#1}% \else \csname SET#1\endcsname \fi } % Used for @value's in index entries to form the sort key: expand the @value % if possible, otherwise sort late. \def\indexnofontsvalue#1{% \expandafter\ifx\csname SET#1\endcsname\relax ZZZZZZZ% \else \csname SET#1\endcsname \fi } % @ifset VAR ... @end ifset reads the `...' iff VAR has been defined % with @set. % % To get the special treatment we need for `@end ifset,' we call % \makecond and then redefine. % \makecond{ifset} \def\ifset{\parsearg{\doifset{\let\next=\ifsetfail}}} \def\doifset#1#2{% {% \makevalueexpandable \let\next=\empty \expandafter\ifx\csname SET#2\endcsname\relax #1% If not set, redefine \next. \fi \expandafter }\next } \def\ifsetfail{\doignore{ifset}} % @ifclear VAR ... @end executes the `...' iff VAR has never been % defined with @set, or has been undefined with @clear. % % The `\else' inside the `\doifset' parameter is a trick to reuse the % above code: if the variable is not set, do nothing, if it is set, % then redefine \next to \ifclearfail. % \makecond{ifclear} \def\ifclear{\parsearg{\doifset{\else \let\next=\ifclearfail}}} \def\ifclearfail{\doignore{ifclear}} % @ifcommandisdefined CMD ... @end executes the `...' if CMD (written % without the @) is in fact defined. We can only feasibly check at the % TeX level, so something like `mathcode' is going to considered % defined even though it is not a Texinfo command. % \makecond{ifcommanddefined} \def\ifcommanddefined{\parsearg{\doifcmddefined{\let\next=\ifcmddefinedfail}}} % \def\doifcmddefined#1#2{{% \makevalueexpandable \let\next=\empty \expandafter\ifx\csname #2\endcsname\relax #1% If not defined, \let\next as above. \fi \expandafter }\next } \def\ifcmddefinedfail{\doignore{ifcommanddefined}} % @ifcommandnotdefined CMD ... handled similar to @ifclear above. \makecond{ifcommandnotdefined} \def\ifcommandnotdefined{% \parsearg{\doifcmddefined{\else \let\next=\ifcmdnotdefinedfail}}} \def\ifcmdnotdefinedfail{\doignore{ifcommandnotdefined}} % Set the `txicommandconditionals' variable, so documents have a way to % test if the @ifcommand...defined conditionals are available. \set txicommandconditionals % @dircategory CATEGORY -- specify a category of the dir file % which this file should belong to. Ignore this in TeX. \let\dircategory=\comment % @defininfoenclose. \let\definfoenclose=\comment \message{indexing,} % Index generation facilities % Define \newwrite to be identical to plain tex's \newwrite % except not \outer, so it can be used within macros and \if's. \edef\newwrite{\makecsname{ptexnewwrite}} % \newindex {foo} defines an index named IX. % It automatically defines \IXindex such that % \IXindex ...rest of line... puts an entry in the index IX. % It also defines \IXindfile to be the number of the output channel for % the file that accumulates this index. The file's extension is IX. % The name of an index should be no more than 2 characters long % for the sake of vms. % \def\newindex#1{% \expandafter\chardef\csname#1indfile\endcsname=0 \expandafter\xdef\csname#1index\endcsname{% % Define @#1index \noexpand\doindex{#1}} } % @defindex foo == \newindex{foo} % \def\defindex{\parsearg\newindex} % Define @defcodeindex, like @defindex except put all entries in @code. % \def\defcodeindex{\parsearg\newcodeindex} % \def\newcodeindex#1{% \expandafter\chardef\csname#1indfile\endcsname=0 \expandafter\xdef\csname#1index\endcsname{% \noexpand\docodeindex{#1}}% } % The default indices: \newindex{cp}% concepts, \newcodeindex{fn}% functions, \newcodeindex{vr}% variables, \newcodeindex{tp}% types, \newcodeindex{ky}% keys \newcodeindex{pg}% and programs. % @synindex foo bar makes index foo feed into index bar. % Do this instead of @defindex foo if you don't want it as a separate index. % % @syncodeindex foo bar similar, but put all entries made for index foo % inside @code. % \def\synindex#1 #2 {\dosynindex\doindex{#1}{#2}} \def\syncodeindex#1 #2 {\dosynindex\docodeindex{#1}{#2}} % #1 is \doindex or \docodeindex, #2 the index getting redefined (foo), % #3 the target index (bar). \def\dosynindex#1#2#3{% \requireopenindexfile{#3}% % redefine \fooindfile: \expandafter\let\expandafter\temp\expandafter=\csname#3indfile\endcsname \expandafter\let\csname#2indfile\endcsname=\temp % redefine \fooindex: \expandafter\xdef\csname#2index\endcsname{\noexpand#1{#3}}% } % Define \doindex, the driver for all index macros. % Argument #1 is generated by the calling \fooindex macro, % and it is the two-letter name of the index. \def\doindex#1{\edef\indexname{#1}\parsearg\doindexxxx} \def\doindexxxx #1{\doind{\indexname}{#1}} % like the previous two, but they put @code around the argument. \def\docodeindex#1{\edef\indexname{#1}\parsearg\docodeindexxxx} \def\docodeindexxxx #1{\docind{\indexname}{#1}} % Used for the aux, toc and index files to prevent expansion of Texinfo % commands. % \def\atdummies{% \definedummyletter\@% \definedummyletter\ % \definedummyletter\{% \definedummyletter\}% \definedummyletter\&% % % Do the redefinitions. \definedummies \otherbackslash } % \definedummyword defines \#1 as \string\#1\space, thus effectively % preventing its expansion. This is used only for control words, % not control letters, because the \space would be incorrect for % control characters, but is needed to separate the control word % from whatever follows. % % These can be used both for control words that take an argument and % those that do not. If it is followed by {arg} in the input, then % that will dutifully get written to the index (or wherever). % % For control letters, we have \definedummyletter, which omits the % space. % \def\definedummyword #1{\def#1{\string#1\space}}% \def\definedummyletter#1{\def#1{\string#1}}% \let\definedummyaccent\definedummyletter % Called from \atdummies to prevent the expansion of commands. % \def\definedummies{% % \let\commondummyword\definedummyword \let\commondummyletter\definedummyletter \let\commondummyaccent\definedummyaccent \commondummiesnofonts % \definedummyletter\_% \definedummyletter\-% % % Non-English letters. \definedummyword\AA \definedummyword\AE \definedummyword\DH \definedummyword\L \definedummyword\O \definedummyword\OE \definedummyword\TH \definedummyword\aa \definedummyword\ae \definedummyword\dh \definedummyword\exclamdown \definedummyword\l \definedummyword\o \definedummyword\oe \definedummyword\ordf \definedummyword\ordm \definedummyword\questiondown \definedummyword\ss \definedummyword\th % % Although these internal commands shouldn't show up, sometimes they do. \definedummyword\bf \definedummyword\gtr \definedummyword\hat \definedummyword\less \definedummyword\sf \definedummyword\sl \definedummyword\tclose \definedummyword\tt % \definedummyword\LaTeX \definedummyword\TeX % % Assorted special characters. \definedummyword\ampchar \definedummyword\atchar \definedummyword\arrow \definedummyword\backslashchar \definedummyword\bullet \definedummyword\comma \definedummyword\copyright \definedummyword\registeredsymbol \definedummyword\dots \definedummyword\enddots \definedummyword\entrybreak \definedummyword\equiv \definedummyword\error \definedummyword\euro \definedummyword\expansion \definedummyword\geq \definedummyword\guillemetleft \definedummyword\guillemetright \definedummyword\guilsinglleft \definedummyword\guilsinglright \definedummyword\lbracechar \definedummyword\leq \definedummyword\mathopsup \definedummyword\minus \definedummyword\ogonek \definedummyword\pounds \definedummyword\point \definedummyword\print \definedummyword\quotedblbase \definedummyword\quotedblleft \definedummyword\quotedblright \definedummyword\quoteleft \definedummyword\quoteright \definedummyword\quotesinglbase \definedummyword\rbracechar \definedummyword\result \definedummyword\sub \definedummyword\sup \definedummyword\textdegree % \definedummyword\subentry % % We want to disable all macros so that they are not expanded by \write. \macrolist \let\value\dummyvalue % \normalturnoffactive } % \commondummiesnofonts: common to \definedummies and \indexnofonts. % Define \commondummyletter, \commondummyaccent and \commondummyword before % using. Used for accents, font commands, and various control letters. % \def\commondummiesnofonts{% % Control letters and accents. \commondummyletter\!% \commondummyaccent\"% \commondummyaccent\'% \commondummyletter\*% \commondummyaccent\,% \commondummyletter\.% \commondummyletter\/% \commondummyletter\:% \commondummyaccent\=% \commondummyletter\?% \commondummyaccent\^% \commondummyaccent\`% \commondummyaccent\~% \commondummyword\u \commondummyword\v \commondummyword\H \commondummyword\dotaccent \commondummyword\ogonek \commondummyword\ringaccent \commondummyword\tieaccent \commondummyword\ubaraccent \commondummyword\udotaccent \commondummyword\dotless % % Texinfo font commands. \commondummyword\b \commondummyword\i \commondummyword\r \commondummyword\sansserif \commondummyword\sc \commondummyword\slanted \commondummyword\t % % Commands that take arguments. \commondummyword\abbr \commondummyword\acronym \commondummyword\anchor \commondummyword\cite \commondummyword\code \commondummyword\command \commondummyword\dfn \commondummyword\dmn \commondummyword\email \commondummyword\emph \commondummyword\env \commondummyword\file \commondummyword\image \commondummyword\indicateurl \commondummyword\inforef \commondummyword\kbd \commondummyword\key \commondummyword\math \commondummyword\option \commondummyword\pxref \commondummyword\ref \commondummyword\samp \commondummyword\strong \commondummyword\tie \commondummyword\U \commondummyword\uref \commondummyword\url \commondummyword\var \commondummyword\verb \commondummyword\w \commondummyword\xref } \let\indexlbrace\relax \let\indexrbrace\relax \let\indexatchar\relax \let\indexbackslash\relax {\catcode`\@=0 \catcode`\\=13 @gdef@backslashdisappear{@def\{}} } { \catcode`\<=13 \catcode`\-=13 \catcode`\`=13 \gdef\indexnonalnumdisappear{% \expandafter\ifx\csname SETtxiindexlquoteignore\endcsname\relax\else % @set txiindexlquoteignore makes us ignore left quotes in the sort term. % (Introduced for FSFS 2nd ed.) \let`=\empty \fi % \expandafter\ifx\csname SETtxiindexbackslashignore\endcsname\relax\else \backslashdisappear \fi % \expandafter\ifx\csname SETtxiindexhyphenignore\endcsname\relax\else \def-{}% \fi \expandafter\ifx\csname SETtxiindexlessthanignore\endcsname\relax\else \def<{}% \fi \expandafter\ifx\csname SETtxiindexatsignignore\endcsname\relax\else \def\@{}% \fi } \gdef\indexnonalnumreappear{% \let-\normaldash \let<\normalless } } % \indexnofonts is used when outputting the strings to sort the index % by, and when constructing control sequence names. It eliminates all % control sequences and just writes whatever the best ASCII sort string % would be for a given command (usually its argument). % \def\indexnofonts{% % Accent commands should become @asis. \def\commondummyaccent##1{\let##1\asis}% % We can just ignore other control letters. \def\commondummyletter##1{\let##1\empty}% % All control words become @asis by default; overrides below. \let\commondummyword\commondummyaccent \commondummiesnofonts % % Don't no-op \tt, since it isn't a user-level command % and is used in the definitions of the active chars like <, >, |, etc. % Likewise with the other plain tex font commands. %\let\tt=\asis % \def\ { }% \def\@{@}% \def\_{\normalunderscore}% \def\-{}% @- shouldn't affect sorting % \uccode`\1=`\{ \uppercase{\def\{{1}}% \uccode`\1=`\} \uppercase{\def\}{1}}% \let\lbracechar\{% \let\rbracechar\}% % % \let\do\indexnofontsdef % % Non-English letters. \do\AA{AA}% \do\AE{AE}% \do\DH{DZZ}% \do\L{L}% \do\OE{OE}% \do\O{O}% \do\TH{TH}% \do\aa{aa}% \do\ae{ae}% \do\dh{dzz}% \do\exclamdown{!}% \do\l{l}% \do\oe{oe}% \do\ordf{a}% \do\ordm{o}% \do\o{o}% \do\questiondown{?}% \do\ss{ss}% \do\th{th}% % \do\LaTeX{LaTeX}% \do\TeX{TeX}% % % Assorted special characters. \do\atchar{@}% \do\arrow{->}% \do\bullet{bullet}% \do\comma{,}% \do\copyright{copyright}% \do\dots{...}% \do\enddots{...}% \do\equiv{==}% \do\error{error}% \do\euro{euro}% \do\expansion{==>}% \do\geq{>=}% \do\guillemetleft{<<}% \do\guillemetright{>>}% \do\guilsinglleft{<}% \do\guilsinglright{>}% \do\leq{<=}% \do\lbracechar{\{}% \do\minus{-}% \do\point{.}% \do\pounds{pounds}% \do\print{-|}% \do\quotedblbase{"}% \do\quotedblleft{"}% \do\quotedblright{"}% \do\quoteleft{`}% \do\quoteright{'}% \do\quotesinglbase{,}% \do\rbracechar{\}}% \do\registeredsymbol{R}% \do\result{=>}% \do\textdegree{o}% % % We need to get rid of all macros, leaving only the arguments (if present). % Of course this is not nearly correct, but it is the best we can do for now. % makeinfo does not expand macros in the argument to @deffn, which ends up % writing an index entry, and texindex isn't prepared for an index sort entry % that starts with \. % % Since macro invocations are followed by braces, we can just redefine them % to take a single TeX argument. The case of a macro invocation that % goes to end-of-line is not handled. % \macrolist \let\value\indexnofontsvalue } % Give the control sequence a definition that removes the {} that follows % its use, e.g. @AA{} -> AA \def\indexnofontsdef#1#2{\def#1##1{#2}}% % #1 is the index name, #2 is the entry text. \def\doind#1#2{% \iflinks {% % \requireopenindexfile{#1}% \edef\writeto{\csname#1indfile\endcsname}% % \def\indextext{#2}% \safewhatsit\doindwrite }% \fi } % Same as \doind, but for code indices \def\docind#1#2{% \iflinks {% % \requireopenindexfile{#1}% \edef\writeto{\csname#1indfile\endcsname}% % \def\indextext{#2}% \safewhatsit\docindwrite }% \fi } % Check if an index file has been opened, and if not, open it. \def\requireopenindexfile#1{% \ifnum\csname #1indfile\endcsname=0 \expandafter\newwrite \csname#1indfile\endcsname \edef\suffix{#1}% % A .fls suffix would conflict with the file extension for the output % of -recorder, so use .f1s instead. \ifx\suffix\indexisfl\def\suffix{f1}\fi % Open the file \immediate\openout\csname#1indfile\endcsname \jobname.\suffix % Using \immediate above here prevents an object entering into the current % box, which could confound checks such as those in \safewhatsit for % preceding skips. \typeout{Writing index file \jobname.\suffix}% \fi} \def\indexisfl{fl} % Definition for writing index entry sort key. { \catcode`\-=13 \gdef\indexwritesortas{% \begingroup \indexnonalnumreappear \indexwritesortasxxx} \gdef\indexwritesortasxxx#1{% \xdef\indexsortkey{#1}\endgroup} } \def\indexwriteseealso#1{ \gdef\pagenumbertext{\string\seealso{#1}}% } \def\indexwriteseeentry#1{ \gdef\pagenumbertext{\string\seeentry{#1}}% } % The default definitions \def\sortas#1{}% \def\seealso#1{\i{\putwordSeeAlso}\ #1}% for sorted index file only \def\putwordSeeAlso{See also} \def\seeentry#1{\i{\putwordSee}\ #1}% for sorted index file only % Given index entry text like "aaa @subentry bbb @sortas{ZZZ}": % * Set \bracedtext to "{aaa}{bbb}" % * Set \fullindexsortkey to "aaa @subentry ZZZ" % * If @seealso occurs, set \pagenumbertext % \def\splitindexentry#1{% \gdef\fullindexsortkey{}% \xdef\bracedtext{}% \def\sep{}% \def\seealso##1{}% \def\seeentry##1{}% \expandafter\doindexsegment#1\subentry\finish\subentry } % append the results from the next segment \def\doindexsegment#1\subentry{% \def\segment{#1}% \ifx\segment\isfinish \else % % Fully expand the segment, throwing away any @sortas directives, and % trim spaces. \edef\trimmed{\segment}% \edef\trimmed{\expandafter\eatspaces\expandafter{\trimmed}}% \ifincodeindex \edef\trimmed{\noexpand\code{\trimmed}}% \fi % \xdef\bracedtext{\bracedtext{\trimmed}}% % % Get the string to sort by. Process the segment with all % font commands turned off. \bgroup \let\sortas\indexwritesortas \let\seealso\indexwriteseealso \let\seeentry\indexwriteseeentry \indexnofonts % The braces around the commands are recognized by texindex. \def\lbracechar{{\string\indexlbrace}}% \def\rbracechar{{\string\indexrbrace}}% \let\{=\lbracechar \let\}=\rbracechar \def\@{{\string\indexatchar}}% \def\atchar##1{\@}% \def\backslashchar{{\string\indexbackslash}}% \uccode`\~=`\\ \uppercase{\let~\backslashchar}% % \let\indexsortkey\empty \global\let\pagenumbertext\empty % Execute the segment and throw away the typeset output. This executes % any @sortas or @seealso commands in this segment. \setbox\dummybox = \hbox{\segment}% \ifx\indexsortkey\empty{% \indexnonalnumdisappear \xdef\trimmed{\segment}% \xdef\trimmed{\expandafter\eatspaces\expandafter{\trimmed}}% \xdef\indexsortkey{\trimmed}% \ifx\indexsortkey\empty\xdef\indexsortkey{ }\fi }\fi % % Append to \fullindexsortkey. \edef\tmp{\gdef\noexpand\fullindexsortkey{% \fullindexsortkey\sep\indexsortkey}}% \tmp \egroup \def\sep{\subentry}% % \expandafter\doindexsegment \fi } \def\isfinish{\finish}% \newbox\dummybox % used above \let\subentry\relax % Use \ instead of @ in index files. To support old texi2dvi and texindex. % This works without changing the escape character used in the toc or aux % files because the index entries are fully expanded here, and \string uses % the current value of \escapechar. \def\escapeisbackslash{\escapechar=`\\} % Use \ in index files by default. texi2dvi didn't support @ as the escape % character (as it checked for "\entry" in the files, and not "@entry"). When % the new version of texi2dvi has had a chance to become more prevalent, then % the escape character can change back to @ again. This should be an easy % change to make now because both @ and \ are only used as escape characters in % index files, never standing for themselves. % \set txiindexescapeisbackslash % Write the entry in \indextext to the index file. % \newif\ifincodeindex \def\doindwrite{\incodeindexfalse\doindwritex} \def\docindwrite{\incodeindextrue\doindwritex} \def\doindwritex{% \maybemarginindex % \atdummies % \expandafter\ifx\csname SETtxiindexescapeisbackslash\endcsname\relax\else \escapeisbackslash \fi % % For texindex which always views { and } as separators. \def\{{\lbracechar{}}% \def\}{\rbracechar{}}% \uccode`\~=`\\ \uppercase{\def~{\backslashchar{}}}% % % Split the entry into primary entry and any subentries, and get the index % sort key. \splitindexentry\indextext % % Set up the complete index entry, with both the sort key and % the original text, including any font commands. We write % three arguments to \entry to the .?? file (four in the % subentry case), texindex reduces to two when writing the .??s % sorted result. % \edef\temp{% \write\writeto{% \string\entry{\fullindexsortkey}% {\ifx\pagenumbertext\empty\noexpand\folio\else\pagenumbertext\fi}% \bracedtext}% }% \temp } % Put the index entry in the margin if desired (undocumented). \def\maybemarginindex{% \ifx\SETmarginindex\relax\else \insert\margin{\hbox{\vrule height8pt depth3pt width0pt \relax\indextext}}% \fi } \let\SETmarginindex=\relax % Take care of unwanted page breaks/skips around a whatsit: % % If a skip is the last thing on the list now, preserve it % by backing up by \lastskip, doing the \write, then inserting % the skip again. Otherwise, the whatsit generated by the % \write or \pdfdest will make \lastskip zero. The result is that % sequences like this: % @end defun % @tindex whatever % @defun ... % will have extra space inserted, because the \medbreak in the % start of the @defun won't see the skip inserted by the @end of % the previous defun. % % But don't do any of this if we're not in vertical mode. We % don't want to do a \vskip and prematurely end a paragraph. % % Avoid page breaks due to these extra skips, too. % % But wait, there is a catch there: % We'll have to check whether \lastskip is zero skip. \ifdim is not % sufficient for this purpose, as it ignores stretch and shrink parts % of the skip. The only way seems to be to check the textual % representation of the skip. % % The following is almost like \def\zeroskipmacro{0.0pt} except that % the ``p'' and ``t'' characters have catcode \other, not 11 (letter). % \edef\zeroskipmacro{\expandafter\the\csname z@skip\endcsname} % \newskip\whatsitskip \newcount\whatsitpenalty % % ..., ready, GO: % \def\safewhatsit#1{\ifhmode #1% \else % \lastskip and \lastpenalty cannot both be nonzero simultaneously. \whatsitskip = \lastskip \edef\lastskipmacro{\the\lastskip}% \whatsitpenalty = \lastpenalty % % If \lastskip is nonzero, that means the last item was a % skip. And since a skip is discardable, that means this % -\whatsitskip glue we're inserting is preceded by a % non-discardable item, therefore it is not a potential % breakpoint, therefore no \nobreak needed. \ifx\lastskipmacro\zeroskipmacro \else \vskip-\whatsitskip \fi % #1% % \ifx\lastskipmacro\zeroskipmacro % If \lastskip was zero, perhaps the last item was a penalty, and % perhaps it was >=10000, e.g., a \nobreak. In that case, we want % to re-insert the same penalty (values >10000 are used for various % signals); since we just inserted a non-discardable item, any % following glue (such as a \parskip) would be a breakpoint. For example: % @deffn deffn-whatever % @vindex index-whatever % Description. % would allow a break between the index-whatever whatsit % and the "Description." paragraph. \ifnum\whatsitpenalty>9999 \penalty\whatsitpenalty \fi \else % On the other hand, if we had a nonzero \lastskip, % this make-up glue would be preceded by a non-discardable item % (the whatsit from the \write), so we must insert a \nobreak. \nobreak\vskip\whatsitskip \fi \fi} % The index entry written in the file actually looks like % \entry {sortstring}{page}{topic} % or % \entry {sortstring}{page}{topic}{subtopic} % The texindex program reads in these files and writes files % containing these kinds of lines: % \initial {c} % before the first topic whose initial is c % \entry {topic}{pagelist} % for a topic that is used without subtopics % \primary {topic} % \entry {topic}{} % for the beginning of a topic that is used with subtopics % \secondary {subtopic}{pagelist} % for each subtopic. % \secondary {subtopic}{} % for a subtopic with sub-subtopics % \tertiary {subtopic}{subsubtopic}{pagelist} % for each sub-subtopic. % Define the user-accessible indexing commands % @findex, @vindex, @kindex, @cindex. \def\findex {\fnindex} \def\kindex {\kyindex} \def\cindex {\cpindex} \def\vindex {\vrindex} \def\tindex {\tpindex} \def\pindex {\pgindex} % Define the macros used in formatting output of the sorted index material. % @printindex causes a particular index (the ??s file) to get printed. % It does not print any chapter heading (usually an @unnumbered). % \parseargdef\printindex{\begingroup \dobreak \chapheadingskip{10000}% % \smallfonts \rm \tolerance = 9500 \plainfrenchspacing \everypar = {}% don't want the \kern\-parindent from indentation suppression. % % See comment in \requireopenindexfile. \def\indexname{#1}\ifx\indexname\indexisfl\def\indexname{f1}\fi % % See if the index file exists and is nonempty. \openin 1 \jobname.\indexname s \ifeof 1 % \enddoublecolumns gets confused if there is no text in the index, % and it loses the chapter title and the aux file entries for the % index. The easiest way to prevent this problem is to make sure % there is some text. \putwordIndexNonexistent \typeout{No file \jobname.\indexname s.}% \else % If the index file exists but is empty, then \openin leaves \ifeof % false. We have to make TeX try to read something from the file, so % it can discover if there is anything in it. \read 1 to \thisline \ifeof 1 \putwordIndexIsEmpty \else \expandafter\printindexzz\thisline\relax\relax\finish% \fi \fi \closein 1 \endgroup} % If the index file starts with a backslash, forgo reading the index % file altogether. If somebody upgrades texinfo.tex they may still have % old index files using \ as the escape character. Reading this would % at best lead to typesetting garbage, at worst a TeX syntax error. \def\printindexzz#1#2\finish{% \expandafter\ifx\csname SETtxiindexescapeisbackslash\endcsname\relax \uccode`\~=`\\ \uppercase{\if\noexpand~}\noexpand#1 \expandafter\ifx\csname SETtxiskipindexfileswithbackslash\endcsname\relax \errmessage{% ERROR: A sorted index file in an obsolete format was skipped. To fix this problem, please upgrade your version of 'texi2dvi' or 'texi2pdf' to that at . If you are using an old version of 'texindex' (part of the Texinfo distribution), you may also need to upgrade to a newer version (at least 6.0). You may be able to typeset the index if you run 'texindex \jobname.\indexname' yourself. You could also try setting the 'txiindexescapeisbackslash' flag by running a command like 'texi2dvi -t "@set txiindexescapeisbackslash" \jobname.texi'. If you do this, Texinfo will try to use index files in the old format. If you continue to have problems, deleting the index files and starting again might help (with 'rm \jobname.?? \jobname.??s')% }% \else (Skipped sorted index file in obsolete format) \fi \else \begindoublecolumns \input \jobname.\indexname s \enddoublecolumns \fi \else \begindoublecolumns \catcode`\\=0\relax % % Make @ an escape character to give macros a chance to work. This % should work because we (hopefully) don't otherwise use @ in index files. %\catcode`\@=12\relax \catcode`\@=0\relax \input \jobname.\indexname s \enddoublecolumns \fi } % These macros are used by the sorted index file itself. % Change them to control the appearance of the index. {\catcode`\/=13 \catcode`\-=13 \catcode`\^=13 \catcode`\~=13 \catcode`\_=13 \catcode`\|=13 \catcode`\<=13 \catcode`\>=13 \catcode`\+=13 \catcode`\"=13 \catcode`\$=3 \gdef\initialglyphs{% % special control sequences used in the index sort key \let\indexlbrace\{% \let\indexrbrace\}% \let\indexatchar\@% \def\indexbackslash{\math{\backslash}}% % % Some changes for non-alphabetic characters. Using the glyphs from the % math fonts looks more consistent than the typewriter font used elsewhere % for these characters. \uccode`\~=`\\ \uppercase{\def~{\math{\backslash}}} % % In case @\ is used for backslash \uppercase{\let\\=~} % Can't get bold backslash so don't use bold forward slash \catcode`\/=13 \def/{{\secrmnotbold \normalslash}}% \def-{{\normaldash\normaldash}}% en dash `--' \def^{{\chapbf \normalcaret}}% \def~{{\chapbf \normaltilde}}% \def\_{% \leavevmode \kern.07em \vbox{\hrule width.3em height.1ex}\kern .07em }% \def|{$\vert$}% \def<{$\less$}% \def>{$\gtr$}% \def+{$\normalplus$}% }} \def\initial{% \bgroup \initialglyphs \initialx } \def\initialx#1{% % Remove any glue we may have, we'll be inserting our own. \removelastskip % % We like breaks before the index initials, so insert a bonus. % The glue before the bonus allows a little bit of space at the % bottom of a column to reduce an increase in inter-line spacing. \nobreak \vskip 0pt plus 5\baselineskip \penalty -300 \vskip 0pt plus -5\baselineskip % % Typeset the initial. Making this add up to a whole number of % baselineskips increases the chance of the dots lining up from column % to column. It still won't often be perfect, because of the stretch % we need before each entry, but it's better. % % No shrink because it confuses \balancecolumns. \vskip 1.67\baselineskip plus 1\baselineskip \leftline{\secfonts \kern-0.05em \secbf #1}% % \secfonts is inside the argument of \leftline so that the change of % \baselineskip will not affect any glue inserted before the vbox that % \leftline creates. % Do our best not to break after the initial. \nobreak \vskip .33\baselineskip plus .1\baselineskip \egroup % \initialglyphs } \newdimen\entryrightmargin \entryrightmargin=0pt % \entry typesets a paragraph consisting of the text (#1), dot leaders, and % then page number (#2) flushed to the right margin. It is used for index % and table of contents entries. The paragraph is indented by \leftskip. % \def\entry{% \begingroup % % Start a new paragraph if necessary, so our assignments below can't % affect previous text. \par % % No extra space above this paragraph. \parskip = 0in % % When reading the text of entry, convert explicit line breaks % from @* into spaces. The user might give these in long section % titles, for instance. \def\*{\unskip\space\ignorespaces}% \def\entrybreak{\hfil\break}% An undocumented command % % Swallow the left brace of the text (first parameter): \afterassignment\doentry \let\temp = } \def\entrybreak{\unskip\space\ignorespaces}% \def\doentry{% % Save the text of the entry \global\setbox\boxA=\hbox\bgroup \bgroup % Instead of the swallowed brace. \noindent \aftergroup\finishentry % And now comes the text of the entry. % Not absorbing as a macro argument reduces the chance of problems % with catcodes occurring. } {\catcode`\@=11 \gdef\finishentry#1{% \egroup % end box A \dimen@ = \wd\boxA % Length of text of entry \global\setbox\boxA=\hbox\bgroup \unhbox\boxA % #1 is the page number. % % Get the width of the page numbers, and only use % leaders if they are present. \global\setbox\boxB = \hbox{#1}% \ifdim\wd\boxB = 0pt \null\nobreak\hfill\ % \else % \null\nobreak\indexdotfill % Have leaders before the page number. % \ifpdforxetex \pdfgettoks#1.% \hskip\skip\thinshrinkable\the\toksA \else \hskip\skip\thinshrinkable #1% \fi \fi \egroup % end \boxA \ifdim\wd\boxB = 0pt \noindent\unhbox\boxA\par \nobreak \else\bgroup % We want the text of the entries to be aligned to the left, and the % page numbers to be aligned to the right. % \parindent = 0pt \advance\leftskip by 0pt plus 1fil \advance\leftskip by 0pt plus -1fill \rightskip = 0pt plus -1fil \advance\rightskip by 0pt plus 1fill % Cause last line, which could consist of page numbers on their own % if the list of page numbers is long, to be aligned to the right. \parfillskip=0pt plus -1fill % \advance\rightskip by \entryrightmargin % Determine how far we can stretch into the margin. % This allows, e.g., "Appendix H GNU Free Documentation License" to % fit on one line in @letterpaper format. \ifdim\entryrightmargin>2.1em \dimen@i=2.1em \else \dimen@i=0em \fi \advance \parfillskip by 0pt minus 1\dimen@i % \dimen@ii = \hsize \advance\dimen@ii by -1\leftskip \advance\dimen@ii by -1\entryrightmargin \advance\dimen@ii by 1\dimen@i \ifdim\wd\boxA > \dimen@ii % If the entry doesn't fit in one line \ifdim\dimen@ > 0.8\dimen@ii % due to long index text % Try to split the text roughly evenly. \dimen@ will be the length of % the first line. \dimen@ = 0.7\dimen@ \dimen@ii = \hsize \ifnum\dimen@>\dimen@ii % If the entry is too long (for example, if it needs more than % two lines), use all the space in the first line. \dimen@ = \dimen@ii \fi \advance\leftskip by 0pt plus 1fill % ragged right \advance \dimen@ by 1\rightskip \parshape = 2 0pt \dimen@ 0em \dimen@ii % Ideally we'd add a finite glue at the end of the first line only, % instead of using \parshape with explicit line lengths, but TeX % doesn't seem to provide a way to do such a thing. % % Indent all lines but the first one. \advance\leftskip by 1em \advance\parindent by -1em \fi\fi \indent % start paragraph \unhbox\boxA % % Do not prefer a separate line ending with a hyphen to fewer lines. \finalhyphendemerits = 0 % % Word spacing - no stretch \spaceskip=\fontdimen2\font minus \fontdimen4\font % \linepenalty=1000 % Discourage line breaks. \hyphenpenalty=5000 % Discourage hyphenation. % \par % format the paragraph \egroup % The \vbox \fi \endgroup }} \newskip\thinshrinkable \skip\thinshrinkable=.15em minus .15em % Like plain.tex's \dotfill, except uses up at least 1 em. % The filll stretch here overpowers both the fil and fill stretch to push % the page number to the right. \def\indexdotfill{\cleaders \hbox{$\mathsurround=0pt \mkern1.5mu.\mkern1.5mu$}\hskip 1em plus 1filll} \def\primary #1{\line{#1\hfil}} \def\secondary{\indententry{0.5cm}} \def\tertiary{\indententry{1cm}} \def\indententry#1#2#3{% \bgroup \leftskip=#1 \entry{#2}{#3}% \egroup } % Define two-column mode, which we use to typeset indexes. % Adapted from the TeXbook, page 416, which is to say, % the manmac.tex format used to print the TeXbook itself. \catcode`\@=11 % private names \newbox\partialpage \newdimen\doublecolumnhsize \def\begindoublecolumns{\begingroup % ended by \enddoublecolumns % If not much space left on page, start a new page. \ifdim\pagetotal>0.8\vsize\vfill\eject\fi % % Grab any single-column material above us. \output = {% \savetopmark % \global\setbox\partialpage = \vbox{% % Unvbox the main output page. \unvbox\PAGE \kern-\topskip \kern\baselineskip }% }% \eject % run that output routine to set \partialpage % % Use the double-column output routine for subsequent pages. \output = {\doublecolumnout}% % % Change the page size parameters. We could do this once outside this % routine, in each of @smallbook, @afourpaper, and the default 8.5x11 % format, but then we repeat the same computation. Repeating a couple % of assignments once per index is clearly meaningless for the % execution time, so we may as well do it in one place. % % First we halve the line length, less a little for the gutter between % the columns. We compute the gutter based on the line length, so it % changes automatically with the paper format. The magic constant % below is chosen so that the gutter has the same value (well, +-<1pt) % as it did when we hard-coded it. % % We put the result in a separate register, \doublecolumhsize, so we % can restore it in \pagesofar, after \hsize itself has (potentially) % been clobbered. % \doublecolumnhsize = \hsize \advance\doublecolumnhsize by -.04154\hsize \divide\doublecolumnhsize by 2 \hsize = \doublecolumnhsize % % Get the available space for the double columns -- the normal % (undoubled) page height minus any material left over from the % previous page. \advance\vsize by -\ht\partialpage \vsize = 2\vsize % % For the benefit of balancing columns \advance\baselineskip by 0pt plus 0.5pt } % The double-column output routine for all double-column pages except % the last, which is done by \balancecolumns. % \def\doublecolumnout{% % \savetopmark \splittopskip=\topskip \splitmaxdepth=\maxdepth \dimen@ = \vsize \divide\dimen@ by 2 % % box0 will be the left-hand column, box2 the right. \setbox0=\vsplit\PAGE to\dimen@ \setbox2=\vsplit\PAGE to\dimen@ \global\advance\vsize by 2\ht\partialpage \onepageout\pagesofar % empty except for the first time we are called \unvbox\PAGE \penalty\outputpenalty } % % Re-output the contents of the output page -- any previous material, % followed by the two boxes we just split, in box0 and box2. \def\pagesofar{% \unvbox\partialpage % \hsize = \doublecolumnhsize \wd0=\hsize \wd2=\hsize \hbox to\txipagewidth{\box0\hfil\box2}% } % Finished with double columns. \def\enddoublecolumns{% % The following penalty ensures that the page builder is exercised % _before_ we change the output routine. This is necessary in the % following situation: % % The last section of the index consists only of a single entry. % Before this section, \pagetotal is less than \pagegoal, so no % break occurs before the last section starts. However, the last % section, consisting of \initial and the single \entry, does not % fit on the page and has to be broken off. Without the following % penalty the page builder will not be exercised until \eject % below, and by that time we'll already have changed the output % routine to the \balancecolumns version, so the next-to-last % double-column page will be processed with \balancecolumns, which % is wrong: The two columns will go to the main vertical list, with % the broken-off section in the recent contributions. As soon as % the output routine finishes, TeX starts reconsidering the page % break. The two columns and the broken-off section both fit on the % page, because the two columns now take up only half of the page % goal. When TeX sees \eject from below which follows the final % section, it invokes the new output routine that we've set after % \balancecolumns below; \onepageout will try to fit the two columns % and the final section into the vbox of \txipageheight (see % \pagebody), causing an overfull box. % % Note that glue won't work here, because glue does not exercise the % page builder, unlike penalties (see The TeXbook, pp. 280-281). \penalty0 % \output = {% % Split the last of the double-column material. \savetopmark \balancecolumns }% \eject % call the \output just set \ifdim\pagetotal=0pt % Having called \balancecolumns once, we do not % want to call it again. Therefore, reset \output to its normal % definition right away. \global\output=\expandafter{\the\defaultoutput} % \endgroup % started in \begindoublecolumns % Leave the double-column material on the current page, no automatic % page break. \box\balancedcolumns % % \pagegoal was set to the doubled \vsize above, since we restarted % the current page. We're now back to normal single-column % typesetting, so reset \pagegoal to the normal \vsize. \global\vsize = \txipageheight % \pagegoal = \txipageheight % \else % We had some left-over material. This might happen when \doublecolumnout % is called in \balancecolumns. Try again. \expandafter\enddoublecolumns \fi } \newbox\balancedcolumns \setbox\balancedcolumns=\vbox{shouldnt see this}% % % Only called for the last of the double column material. \doublecolumnout % does the others. \def\balancecolumns{% \setbox0 = \vbox{\unvbox\PAGE}% like \box255 but more efficient, see p.120. \dimen@ = \ht0 \ifdim\dimen@<7\baselineskip % Don't split a short final column in two. \setbox2=\vbox{}% \global\setbox\balancedcolumns=\vbox{\pagesofar}% \else % double the leading vertical space \advance\dimen@ by \topskip \advance\dimen@ by-\baselineskip \divide\dimen@ by 2 % target to split to \dimen@ii = \dimen@ \splittopskip = \topskip % Loop until left column is at least as high as the right column. {% \vbadness = 10000 \loop \global\setbox3 = \copy0 \global\setbox1 = \vsplit3 to \dimen@ \ifdim\ht1<\ht3 \global\advance\dimen@ by 1pt \repeat }% % Now the left column is in box 1, and the right column in box 3. % % Check whether the left column has come out higher than the page itself. % (Note that we have doubled \vsize for the double columns, so % the actual height of the page is 0.5\vsize). \ifdim2\ht1>\vsize % It appears that we have been called upon to balance too much material. % Output some of it with \doublecolumnout, leaving the rest on the page. \setbox\PAGE=\box0 \doublecolumnout \else % Compare the heights of the two columns. \ifdim4\ht1>5\ht3 % Column heights are too different, so don't make their bottoms % flush with each other. \setbox2=\vbox to \ht1 {\unvbox3\vfill}% \setbox0=\vbox to \ht1 {\unvbox1\vfill}% \else % Make column bottoms flush with each other. \setbox2=\vbox to\ht1{\unvbox3\unskip}% \setbox0=\vbox to\ht1{\unvbox1\unskip}% \fi \global\setbox\balancedcolumns=\vbox{\pagesofar}% \fi \fi % } \catcode`\@ = \other \message{sectioning,} % Chapters, sections, etc. % Let's start with @part. \outer\parseargdef\part{\partzzz{#1}} \def\partzzz#1{% \chapoddpage \null \vskip.3\vsize % move it down on the page a bit \begingroup \noindent \titlefonts\rm #1\par % the text \let\lastnode=\empty % no node to associate with \writetocentry{part}{#1}{}% but put it in the toc \headingsoff % no headline or footline on the part page % This outputs a mark at the end of the page that clears \thischapter % and \thissection, as is done in \startcontents. \let\pchapsepmacro\relax \chapmacro{}{Yomitfromtoc}{}% \chapoddpage \endgroup } % \unnumberedno is an oxymoron. But we count the unnumbered % sections so that we can refer to them unambiguously in the pdf % outlines by their "section number". We avoid collisions with chapter % numbers by starting them at 10000. (If a document ever has 10000 % chapters, we're in trouble anyway, I'm sure.) \newcount\unnumberedno \unnumberedno = 10000 \newcount\chapno \newcount\secno \secno=0 \newcount\subsecno \subsecno=0 \newcount\subsubsecno \subsubsecno=0 % This counter is funny since it counts through charcodes of letters A, B, ... \newcount\appendixno \appendixno = `\@ % % \def\appendixletter{\char\the\appendixno} % We do the following ugly conditional instead of the above simple % construct for the sake of pdftex, which needs the actual % letter in the expansion, not just typeset. % \def\appendixletter{% \ifnum\appendixno=`A A% \else\ifnum\appendixno=`B B% \else\ifnum\appendixno=`C C% \else\ifnum\appendixno=`D D% \else\ifnum\appendixno=`E E% \else\ifnum\appendixno=`F F% \else\ifnum\appendixno=`G G% \else\ifnum\appendixno=`H H% \else\ifnum\appendixno=`I I% \else\ifnum\appendixno=`J J% \else\ifnum\appendixno=`K K% \else\ifnum\appendixno=`L L% \else\ifnum\appendixno=`M M% \else\ifnum\appendixno=`N N% \else\ifnum\appendixno=`O O% \else\ifnum\appendixno=`P P% \else\ifnum\appendixno=`Q Q% \else\ifnum\appendixno=`R R% \else\ifnum\appendixno=`S S% \else\ifnum\appendixno=`T T% \else\ifnum\appendixno=`U U% \else\ifnum\appendixno=`V V% \else\ifnum\appendixno=`W W% \else\ifnum\appendixno=`X X% \else\ifnum\appendixno=`Y Y% \else\ifnum\appendixno=`Z Z% % The \the is necessary, despite appearances, because \appendixletter is % expanded while writing the .toc file. \char\appendixno is not % expandable, thus it is written literally, thus all appendixes come out % with the same letter (or @) in the toc without it. \else\char\the\appendixno \fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi \fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi} % Each @chapter defines these (using marks) as the number+name, number % and name of the chapter. Page headings and footings can use % these. @section does likewise. \def\thischapter{} \def\thischapternum{} \def\thischaptername{} \def\thissection{} \def\thissectionnum{} \def\thissectionname{} \newcount\absseclevel % used to calculate proper heading level \newcount\secbase\secbase=0 % @raisesections/@lowersections modify this count % @raisesections: treat @section as chapter, @subsection as section, etc. \def\raisesections{\global\advance\secbase by -1} % @lowersections: treat @chapter as section, @section as subsection, etc. \def\lowersections{\global\advance\secbase by 1} % we only have subsub. \chardef\maxseclevel = 3 % % A numbered section within an unnumbered changes to unnumbered too. % To achieve this, remember the "biggest" unnum. sec. we are currently in: \chardef\unnlevel = \maxseclevel % % Trace whether the current chapter is an appendix or not: % \chapheadtype is "N" or "A", unnumbered chapters are ignored. \def\chapheadtype{N} % Choose a heading macro % #1 is heading type % #2 is heading level % #3 is text for heading \def\genhead#1#2#3{% % Compute the abs. sec. level: \absseclevel=#2 \advance\absseclevel by \secbase % Make sure \absseclevel doesn't fall outside the range: \ifnum \absseclevel < 0 \absseclevel = 0 \else \ifnum \absseclevel > 3 \absseclevel = 3 \fi \fi % The heading type: \def\headtype{#1}% \if \headtype U% \ifnum \absseclevel < \unnlevel \chardef\unnlevel = \absseclevel \fi \else % Check for appendix sections: \ifnum \absseclevel = 0 \edef\chapheadtype{\headtype}% \else \if \headtype A\if \chapheadtype N% \errmessage{@appendix... within a non-appendix chapter}% \fi\fi \fi % Check for numbered within unnumbered: \ifnum \absseclevel > \unnlevel \def\headtype{U}% \else \chardef\unnlevel = 3 \fi \fi % Now print the heading: \if \headtype U% \ifcase\absseclevel \unnumberedzzz{#3}% \or \unnumberedseczzz{#3}% \or \unnumberedsubseczzz{#3}% \or \unnumberedsubsubseczzz{#3}% \fi \else \if \headtype A% \ifcase\absseclevel \appendixzzz{#3}% \or \appendixsectionzzz{#3}% \or \appendixsubseczzz{#3}% \or \appendixsubsubseczzz{#3}% \fi \else \ifcase\absseclevel \chapterzzz{#3}% \or \seczzz{#3}% \or \numberedsubseczzz{#3}% \or \numberedsubsubseczzz{#3}% \fi \fi \fi \suppressfirstparagraphindent } % an interface: \def\numhead{\genhead N} \def\apphead{\genhead A} \def\unnmhead{\genhead U} % @chapter, @appendix, @unnumbered. Increment top-level counter, reset % all lower-level sectioning counters to zero. % % Also set \chaplevelprefix, which we prepend to @float sequence numbers % (e.g., figures), q.v. By default (before any chapter), that is empty. \let\chaplevelprefix = \empty % \outer\parseargdef\chapter{\numhead0{#1}} % normally numhead0 calls chapterzzz \def\chapterzzz#1{% % section resetting is \global in case the chapter is in a group, such % as an @include file. \global\secno=0 \global\subsecno=0 \global\subsubsecno=0 \global\advance\chapno by 1 % % Used for \float. \gdef\chaplevelprefix{\the\chapno.}% \resetallfloatnos % % \putwordChapter can contain complex things in translations. \toks0=\expandafter{\putwordChapter}% \message{\the\toks0 \space \the\chapno}% % % Write the actual heading. \chapmacro{#1}{Ynumbered}{\the\chapno}% % % So @section and the like are numbered underneath this chapter. \global\let\section = \numberedsec \global\let\subsection = \numberedsubsec \global\let\subsubsection = \numberedsubsubsec } \outer\parseargdef\appendix{\apphead0{#1}} % normally calls appendixzzz % \def\appendixzzz#1{% \global\secno=0 \global\subsecno=0 \global\subsubsecno=0 \global\advance\appendixno by 1 \gdef\chaplevelprefix{\appendixletter.}% \resetallfloatnos % % \putwordAppendix can contain complex things in translations. \toks0=\expandafter{\putwordAppendix}% \message{\the\toks0 \space \appendixletter}% % \chapmacro{#1}{Yappendix}{\appendixletter}% % \global\let\section = \appendixsec \global\let\subsection = \appendixsubsec \global\let\subsubsection = \appendixsubsubsec } % normally unnmhead0 calls unnumberedzzz: \outer\parseargdef\unnumbered{\unnmhead0{#1}} \def\unnumberedzzz#1{% \global\secno=0 \global\subsecno=0 \global\subsubsecno=0 \global\advance\unnumberedno by 1 % % Since an unnumbered has no number, no prefix for figures. \global\let\chaplevelprefix = \empty \resetallfloatnos % % This used to be simply \message{#1}, but TeX fully expands the % argument to \message. Therefore, if #1 contained @-commands, TeX % expanded them. For example, in `@unnumbered The @cite{Book}', TeX % expanded @cite (which turns out to cause errors because \cite is meant % to be executed, not expanded). % % Anyway, we don't want the fully-expanded definition of @cite to appear % as a result of the \message, we just want `@cite' itself. We use % \the to achieve this: TeX expands \the only once, % simply yielding the contents of . (We also do this for % the toc entries.) \toks0 = {#1}% \message{(\the\toks0)}% % \chapmacro{#1}{Ynothing}{\the\unnumberedno}% % \global\let\section = \unnumberedsec \global\let\subsection = \unnumberedsubsec \global\let\subsubsection = \unnumberedsubsubsec } % @centerchap is like @unnumbered, but the heading is centered. \outer\parseargdef\centerchap{% \let\centerparametersmaybe = \centerparameters \unnmhead0{#1}% \let\centerparametersmaybe = \relax } % @top is like @unnumbered. \let\top\unnumbered % Sections. % \outer\parseargdef\numberedsec{\numhead1{#1}} % normally calls seczzz \def\seczzz#1{% \global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1 \sectionheading{#1}{sec}{Ynumbered}{\the\chapno.\the\secno}% } % normally calls appendixsectionzzz: \outer\parseargdef\appendixsection{\apphead1{#1}} \def\appendixsectionzzz#1{% \global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1 \sectionheading{#1}{sec}{Yappendix}{\appendixletter.\the\secno}% } \let\appendixsec\appendixsection % normally calls unnumberedseczzz: \outer\parseargdef\unnumberedsec{\unnmhead1{#1}} \def\unnumberedseczzz#1{% \global\subsecno=0 \global\subsubsecno=0 \global\advance\secno by 1 \sectionheading{#1}{sec}{Ynothing}{\the\unnumberedno.\the\secno}% } % Subsections. % % normally calls numberedsubseczzz: \outer\parseargdef\numberedsubsec{\numhead2{#1}} \def\numberedsubseczzz#1{% \global\subsubsecno=0 \global\advance\subsecno by 1 \sectionheading{#1}{subsec}{Ynumbered}{\the\chapno.\the\secno.\the\subsecno}% } % normally calls appendixsubseczzz: \outer\parseargdef\appendixsubsec{\apphead2{#1}} \def\appendixsubseczzz#1{% \global\subsubsecno=0 \global\advance\subsecno by 1 \sectionheading{#1}{subsec}{Yappendix}% {\appendixletter.\the\secno.\the\subsecno}% } % normally calls unnumberedsubseczzz: \outer\parseargdef\unnumberedsubsec{\unnmhead2{#1}} \def\unnumberedsubseczzz#1{% \global\subsubsecno=0 \global\advance\subsecno by 1 \sectionheading{#1}{subsec}{Ynothing}% {\the\unnumberedno.\the\secno.\the\subsecno}% } % Subsubsections. % % normally numberedsubsubseczzz: \outer\parseargdef\numberedsubsubsec{\numhead3{#1}} \def\numberedsubsubseczzz#1{% \global\advance\subsubsecno by 1 \sectionheading{#1}{subsubsec}{Ynumbered}% {\the\chapno.\the\secno.\the\subsecno.\the\subsubsecno}% } % normally appendixsubsubseczzz: \outer\parseargdef\appendixsubsubsec{\apphead3{#1}} \def\appendixsubsubseczzz#1{% \global\advance\subsubsecno by 1 \sectionheading{#1}{subsubsec}{Yappendix}% {\appendixletter.\the\secno.\the\subsecno.\the\subsubsecno}% } % normally unnumberedsubsubseczzz: \outer\parseargdef\unnumberedsubsubsec{\unnmhead3{#1}} \def\unnumberedsubsubseczzz#1{% \global\advance\subsubsecno by 1 \sectionheading{#1}{subsubsec}{Ynothing}% {\the\unnumberedno.\the\secno.\the\subsecno.\the\subsubsecno}% } % These macros control what the section commands do, according % to what kind of chapter we are in (ordinary, appendix, or unnumbered). % Define them by default for a numbered chapter. \let\section = \numberedsec \let\subsection = \numberedsubsec \let\subsubsection = \numberedsubsubsec % Define @majorheading, @heading and @subheading \def\majorheading{% {\advance\chapheadingskip by 10pt \chapbreak }% \parsearg\chapheadingzzz } \def\chapheading{\chapbreak \parsearg\chapheadingzzz} \def\chapheadingzzz#1{% \vbox{\chapfonts \raggedtitlesettings #1\par}% \nobreak\bigskip \nobreak \suppressfirstparagraphindent } % @heading, @subheading, @subsubheading. \parseargdef\heading{\sectionheading{#1}{sec}{Yomitfromtoc}{} \suppressfirstparagraphindent} \parseargdef\subheading{\sectionheading{#1}{subsec}{Yomitfromtoc}{} \suppressfirstparagraphindent} \parseargdef\subsubheading{\sectionheading{#1}{subsubsec}{Yomitfromtoc}{} \suppressfirstparagraphindent} % These macros generate a chapter, section, etc. heading only % (including whitespace, linebreaking, etc. around it), % given all the information in convenient, parsed form. % Args are the skip and penalty (usually negative) \def\dobreak#1#2{\par\ifdim\lastskip<#1\removelastskip\penalty#2\vskip#1\fi} % Parameter controlling skip before chapter headings (if needed) \newskip\chapheadingskip % Define plain chapter starts, and page on/off switching for it. \def\chapbreak{\dobreak \chapheadingskip {-4000}} % Start a new page \def\chappager{\par\vfill\supereject} % \chapoddpage - start on an odd page for a new chapter % Because \domark is called before \chapoddpage, the filler page will % get the headings for the next chapter, which is wrong. But we don't % care -- we just disable all headings on the filler page. \def\chapoddpage{% \chappager \ifodd\pageno \else \begingroup \headingsoff \null \chappager \endgroup \fi } \parseargdef\setchapternewpage{\csname CHAPPAG#1\endcsname} \def\CHAPPAGoff{% \global\let\contentsalignmacro = \chappager \global\let\pchapsepmacro=\chapbreak \global\def\HEADINGSon{\HEADINGSsinglechapoff}} \def\CHAPPAGon{% \global\let\contentsalignmacro = \chappager \global\let\pchapsepmacro=\chappager \global\def\HEADINGSon{\HEADINGSsingle}} \def\CHAPPAGodd{% \global\let\contentsalignmacro = \chapoddpage \global\let\pchapsepmacro=\chapoddpage \global\def\HEADINGSon{\HEADINGSdouble}} \CHAPPAGon % \chapmacro - Chapter opening. % % #1 is the text, #2 is the section type (Ynumbered, Ynothing, % Yappendix, Yomitfromtoc), #3 the chapter number. % Not used for @heading series. % % To test against our argument. \def\Ynothingkeyword{Ynothing} \def\Yappendixkeyword{Yappendix} \def\Yomitfromtockeyword{Yomitfromtoc} % \def\chapmacro#1#2#3{% \expandafter\ifx\thisenv\titlepage\else \checkenv{}% chapters, etc., should not start inside an environment. \fi % Insert the first mark before the heading break (see notes for \domark). \let\prevchapterdefs=\currentchapterdefs \let\prevsectiondefs=\currentsectiondefs \gdef\currentsectiondefs{\gdef\thissectionname{}\gdef\thissectionnum{}% \gdef\thissection{}}% % \def\temptype{#2}% \ifx\temptype\Ynothingkeyword \gdef\currentchapterdefs{\gdef\thischaptername{#1}\gdef\thischapternum{}% \gdef\thischapter{\thischaptername}}% \else\ifx\temptype\Yomitfromtockeyword \gdef\currentchapterdefs{\gdef\thischaptername{#1}\gdef\thischapternum{}% \gdef\thischapter{}}% \else\ifx\temptype\Yappendixkeyword \toks0={#1}% \xdef\currentchapterdefs{% \gdef\noexpand\thischaptername{\the\toks0}% \gdef\noexpand\thischapternum{\appendixletter}% % \noexpand\putwordAppendix avoids expanding indigestible % commands in some of the translations. \gdef\noexpand\thischapter{\noexpand\putwordAppendix{} \noexpand\thischapternum: \noexpand\thischaptername}% }% \else \toks0={#1}% \xdef\currentchapterdefs{% \gdef\noexpand\thischaptername{\the\toks0}% \gdef\noexpand\thischapternum{\the\chapno}% % \noexpand\putwordChapter avoids expanding indigestible % commands in some of the translations. \gdef\noexpand\thischapter{\noexpand\putwordChapter{} \noexpand\thischapternum: \noexpand\thischaptername}% }% \fi\fi\fi % % Output the mark. Pass it through \safewhatsit, to take care of % the preceding space. \safewhatsit\domark % % Insert the chapter heading break. \pchapsepmacro % % Now the second mark, after the heading break. No break points % between here and the heading. \let\prevchapterdefs=\currentchapterdefs \let\prevsectiondefs=\currentsectiondefs \domark % {% \chapfonts \rm \let\footnote=\errfootnoteheading % give better error message % % Have to define \currentsection before calling \donoderef, because the % xref code eventually uses it. On the other hand, it has to be called % after \pchapsepmacro, or the headline will change too soon. \gdef\currentsection{#1}% % % Only insert the separating space if we have a chapter/appendix % number, and don't print the unnumbered ``number''. \ifx\temptype\Ynothingkeyword \setbox0 = \hbox{}% \def\toctype{unnchap}% \else\ifx\temptype\Yomitfromtockeyword \setbox0 = \hbox{}% contents like unnumbered, but no toc entry \def\toctype{omit}% \else\ifx\temptype\Yappendixkeyword \setbox0 = \hbox{\putwordAppendix{} #3\enspace}% \def\toctype{app}% \else \setbox0 = \hbox{#3\enspace}% \def\toctype{numchap}% \fi\fi\fi % % Write the toc entry for this chapter. Must come before the % \donoderef, because we include the current node name in the toc % entry, and \donoderef resets it to empty. \writetocentry{\toctype}{#1}{#3}% % % For pdftex, we have to write out the node definition (aka, make % the pdfdest) after any page break, but before the actual text has % been typeset. If the destination for the pdf outline is after the % text, then jumping from the outline may wind up with the text not % being visible, for instance under high magnification. \donoderef{#2}% % % Typeset the actual heading. \nobreak % Avoid page breaks at the interline glue. \vbox{\raggedtitlesettings \hangindent=\wd0 \centerparametersmaybe \unhbox0 #1\par}% }% \nobreak\bigskip % no page break after a chapter title \nobreak } % @centerchap -- centered and unnumbered. \let\centerparametersmaybe = \relax \def\centerparameters{% \advance\rightskip by 3\rightskip \leftskip = \rightskip \parfillskip = 0pt } % Section titles. These macros combine the section number parts and % call the generic \sectionheading to do the printing. % \newskip\secheadingskip \def\secheadingbreak{\dobreak \secheadingskip{-1000}} % Subsection titles. \newskip\subsecheadingskip \def\subsecheadingbreak{\dobreak \subsecheadingskip{-500}} % Subsubsection titles. \def\subsubsecheadingskip{\subsecheadingskip} \def\subsubsecheadingbreak{\subsecheadingbreak} % Print any size, any type, section title. % % #1 is the text of the title, % #2 is the section level (sec/subsec/subsubsec), % #3 is the section type (Ynumbered, Ynothing, Yappendix, Yomitfromtoc), % #4 is the section number. % \def\seckeyword{sec} % \def\sectionheading#1#2#3#4{% {% \def\sectionlevel{#2}% \def\temptype{#3}% % % It is ok for the @heading series commands to appear inside an % environment (it's been historically allowed, though the logic is % dubious), but not the others. \ifx\temptype\Yomitfromtockeyword\else \checkenv{}% non-@*heading should not be in an environment. \fi \let\footnote=\errfootnoteheading % % Switch to the right set of fonts. \csname #2fonts\endcsname \rm % % Insert first mark before the heading break (see notes for \domark). \let\prevsectiondefs=\currentsectiondefs \ifx\temptype\Ynothingkeyword \ifx\sectionlevel\seckeyword \gdef\currentsectiondefs{\gdef\thissectionname{#1}\gdef\thissectionnum{}% \gdef\thissection{\thissectionname}}% \fi \else\ifx\temptype\Yomitfromtockeyword % Don't redefine \thissection. \else\ifx\temptype\Yappendixkeyword \ifx\sectionlevel\seckeyword \toks0={#1}% \xdef\currentsectiondefs{% \gdef\noexpand\thissectionname{\the\toks0}% \gdef\noexpand\thissectionnum{#4}% % \noexpand\putwordSection avoids expanding indigestible % commands in some of the translations. \gdef\noexpand\thissection{\noexpand\putwordSection{} \noexpand\thissectionnum: \noexpand\thissectionname}% }% \fi \else \ifx\sectionlevel\seckeyword \toks0={#1}% \xdef\currentsectiondefs{% \gdef\noexpand\thissectionname{\the\toks0}% \gdef\noexpand\thissectionnum{#4}% % \noexpand\putwordSection avoids expanding indigestible % commands in some of the translations. \gdef\noexpand\thissection{\noexpand\putwordSection{} \noexpand\thissectionnum: \noexpand\thissectionname}% }% \fi \fi\fi\fi % % Go into vertical mode. Usually we'll already be there, but we % don't want the following whatsit to end up in a preceding paragraph % if the document didn't happen to have a blank line. \par % % Output the mark. Pass it through \safewhatsit, to take care of % the preceding space. \safewhatsit\domark % % Insert space above the heading. \csname #2headingbreak\endcsname % % Now the second mark, after the heading break. No break points % between here and the heading. \global\let\prevsectiondefs=\currentsectiondefs \domark % % Only insert the space after the number if we have a section number. \ifx\temptype\Ynothingkeyword \setbox0 = \hbox{}% \def\toctype{unn}% \gdef\currentsection{#1}% \else\ifx\temptype\Yomitfromtockeyword % for @headings -- no section number, don't include in toc, % and don't redefine \currentsection. \setbox0 = \hbox{}% \def\toctype{omit}% \let\sectionlevel=\empty \else\ifx\temptype\Yappendixkeyword \setbox0 = \hbox{#4\enspace}% \def\toctype{app}% \gdef\currentsection{#1}% \else \setbox0 = \hbox{#4\enspace}% \def\toctype{num}% \gdef\currentsection{#1}% \fi\fi\fi % % Write the toc entry (before \donoderef). See comments in \chapmacro. \writetocentry{\toctype\sectionlevel}{#1}{#4}% % % Write the node reference (= pdf destination for pdftex). % Again, see comments in \chapmacro. \donoderef{#3}% % % Interline glue will be inserted when the vbox is completed. % That glue will be a valid breakpoint for the page, since it'll be % preceded by a whatsit (usually from the \donoderef, or from the % \writetocentry if there was no node). We don't want to allow that % break, since then the whatsits could end up on page n while the % section is on page n+1, thus toc/etc. are wrong. Debian bug 276000. \nobreak % % Output the actual section heading. \vbox{\hyphenpenalty=10000 \tolerance=5000 \parindent=0pt \ptexraggedright \hangindent=\wd0 % zero if no section number \unhbox0 #1}% }% % Add extra space after the heading -- half of whatever came above it. % Don't allow stretch, though. \kern .5 \csname #2headingskip\endcsname % % Do not let the kern be a potential breakpoint, as it would be if it % was followed by glue. \nobreak % % We'll almost certainly start a paragraph next, so don't let that % glue accumulate. (Not a breakpoint because it's preceded by a % discardable item.) However, when a paragraph is not started next % (\startdefun, \cartouche, \center, etc.), this needs to be wiped out % or the negative glue will cause weirdly wrong output, typically % obscuring the section heading with something else. \vskip-\parskip % % This is so the last item on the main vertical list is a known % \penalty > 10000, so \startdefun, etc., can recognize the situation % and do the needful. \penalty 10001 } \message{toc,} % Table of contents. \newwrite\tocfile % Write an entry to the toc file, opening it if necessary. % Called from @chapter, etc. % % Example usage: \writetocentry{sec}{Section Name}{\the\chapno.\the\secno} % We append the current node name (if any) and page number as additional % arguments for the \{chap,sec,...}entry macros which will eventually % read this. The node name is used in the pdf outlines as the % destination to jump to. % % We open the .toc file for writing here instead of at @setfilename (or % any other fixed time) so that @contents can be anywhere in the document. % But if #1 is `omit', then we don't do anything. This is used for the % table of contents chapter openings themselves. % \newif\iftocfileopened \def\omitkeyword{omit}% % \def\writetocentry#1#2#3{% \edef\writetoctype{#1}% \ifx\writetoctype\omitkeyword \else \iftocfileopened\else \immediate\openout\tocfile = \jobname.toc \global\tocfileopenedtrue \fi % \iflinks {\atdummies \edef\temp{% \write\tocfile{@#1entry{#2}{#3}{\lastnode}{\noexpand\folio}}}% \temp }% \fi \fi % % Tell \shipout to create a pdf destination on each page, if we're % writing pdf. These are used in the table of contents. We can't % just write one on every page because the title pages are numbered % 1 and 2 (the page numbers aren't printed), and so are the first % two pages of the document. Thus, we'd have two destinations named % `1', and two named `2'. \ifpdforxetex \global\pdfmakepagedesttrue \fi } % These characters do not print properly in the Computer Modern roman % fonts, so we must take special care. This is more or less redundant % with the Texinfo input format setup at the end of this file. % \def\activecatcodes{% \catcode`\"=\active \catcode`\$=\active \catcode`\<=\active \catcode`\>=\active \catcode`\\=\active \catcode`\^=\active \catcode`\_=\active \catcode`\|=\active \catcode`\~=\active } % Read the toc file, which is essentially Texinfo input. \def\readtocfile{% \setupdatafile \activecatcodes \input \tocreadfilename } \newskip\contentsrightmargin \contentsrightmargin=1in \newcount\savepageno \newcount\lastnegativepageno \lastnegativepageno = -1 % Prepare to read what we've written to \tocfile. % \def\startcontents#1{% % If @setchapternewpage on, and @headings double, the contents should % start on an odd page, unlike chapters. \contentsalignmacro \immediate\closeout\tocfile % % Don't need to put `Contents' or `Short Contents' in the headline. % It is abundantly clear what they are. \chapmacro{#1}{Yomitfromtoc}{}% % \savepageno = \pageno \begingroup % Set up to handle contents files properly. \raggedbottom % Worry more about breakpoints than the bottom. \entryrightmargin=\contentsrightmargin % Don't use the full line length. % % Roman numerals for page numbers. \ifnum \pageno>0 \global\pageno = \lastnegativepageno \fi \def\thistitle{}% no title in double-sided headings % Record where the Roman numerals started. \ifnum\romancount=0 \global\romancount=\pagecount \fi } % redefined for the two-volume lispref. We always output on % \jobname.toc even if this is redefined. % \def\tocreadfilename{\jobname.toc} % Normal (long) toc. % \def\contents{% \startcontents{\putwordTOC}% \openin 1 \tocreadfilename\space \ifeof 1 \else \readtocfile \fi \vfill \eject \contentsalignmacro % in case @setchapternewpage odd is in effect \ifeof 1 \else \pdfmakeoutlines \fi \closein 1 \endgroup \contentsendroman } % And just the chapters. \def\summarycontents{% \startcontents{\putwordShortTOC}% % \let\partentry = \shortpartentry \let\numchapentry = \shortchapentry \let\appentry = \shortchapentry \let\unnchapentry = \shortunnchapentry % We want a true roman here for the page numbers. \secfonts \let\rm=\shortcontrm \let\bf=\shortcontbf \let\sl=\shortcontsl \let\tt=\shortconttt \rm \hyphenpenalty = 10000 \advance\baselineskip by 1pt % Open it up a little. \def\numsecentry##1##2##3##4{} \let\appsecentry = \numsecentry \let\unnsecentry = \numsecentry \let\numsubsecentry = \numsecentry \let\appsubsecentry = \numsecentry \let\unnsubsecentry = \numsecentry \let\numsubsubsecentry = \numsecentry \let\appsubsubsecentry = \numsecentry \let\unnsubsubsecentry = \numsecentry \openin 1 \tocreadfilename\space \ifeof 1 \else \readtocfile \fi \closein 1 \vfill \eject \contentsalignmacro % in case @setchapternewpage odd is in effect \endgroup \contentsendroman } \let\shortcontents = \summarycontents % Get ready to use Arabic numerals again \def\contentsendroman{% \lastnegativepageno = \pageno \global\pageno = \savepageno % % If \romancount > \arabiccount, the contents are at the end of the % document. Otherwise, advance where the Arabic numerals start for % the page numbers. \ifnum\romancount>\arabiccount\else\global\arabiccount=\pagecount\fi } % Typeset the label for a chapter or appendix for the short contents. % The arg is, e.g., `A' for an appendix, or `3' for a chapter. % \def\shortchaplabel#1{% % This space should be enough, since a single number is .5em, and the % widest letter (M) is 1em, at least in the Computer Modern fonts. % But use \hss just in case. % (This space doesn't include the extra space that gets added after % the label; that gets put in by \shortchapentry above.) % % We'd like to right-justify chapter numbers, but that looks strange % with appendix letters. And right-justifying numbers and % left-justifying letters looks strange when there is less than 10 % chapters. Have to read the whole toc once to know how many chapters % there are before deciding ... \hbox to 1em{#1\hss}% } % These macros generate individual entries in the table of contents. % The first argument is the chapter or section name. % The last argument is the page number. % The arguments in between are the chapter number, section number, ... % Parts, in the main contents. Replace the part number, which doesn't % exist, with an empty box. Let's hope all the numbers have the same width. % Also ignore the page number, which is conventionally not printed. \def\numeralbox{\setbox0=\hbox{8}\hbox to \wd0{\hfil}} \def\partentry#1#2#3#4{% % Add stretch and a bonus for breaking the page before the part heading. % This reduces the chance of the page being broken immediately after the % part heading, before a following chapter heading. \vskip 0pt plus 5\baselineskip \penalty-300 \vskip 0pt plus -5\baselineskip \dochapentry{\numeralbox\labelspace#1}{}% } % % Parts, in the short toc. \def\shortpartentry#1#2#3#4{% \penalty-300 \vskip.5\baselineskip plus.15\baselineskip minus.1\baselineskip \shortchapentry{{\bf #1}}{\numeralbox}{}{}% } % Chapters, in the main contents. \def\numchapentry#1#2#3#4{\dochapentry{#2\labelspace#1}{#4}} % Chapters, in the short toc. % See comments in \dochapentry re vbox and related settings. \def\shortchapentry#1#2#3#4{% \tocentry{\shortchaplabel{#2}\labelspace #1}{\doshortpageno\bgroup#4\egroup}% } % Appendices, in the main contents. % Need the word Appendix, and a fixed-size box. % \def\appendixbox#1{% % We use M since it's probably the widest letter. \setbox0 = \hbox{\putwordAppendix{} M}% \hbox to \wd0{\putwordAppendix{} #1\hss}} % \def\appentry#1#2#3#4{\dochapentry{\appendixbox{#2}\hskip.7em#1}{#4}} % Unnumbered chapters. \def\unnchapentry#1#2#3#4{\dochapentry{#1}{#4}} \def\shortunnchapentry#1#2#3#4{\tocentry{#1}{\doshortpageno\bgroup#4\egroup}} % Sections. \def\numsecentry#1#2#3#4{\dosecentry{#2\labelspace#1}{#4}} \let\appsecentry=\numsecentry \def\unnsecentry#1#2#3#4{\dosecentry{#1}{#4}} % Subsections. \def\numsubsecentry#1#2#3#4{\dosubsecentry{#2\labelspace#1}{#4}} \let\appsubsecentry=\numsubsecentry \def\unnsubsecentry#1#2#3#4{\dosubsecentry{#1}{#4}} % And subsubsections. \def\numsubsubsecentry#1#2#3#4{\dosubsubsecentry{#2\labelspace#1}{#4}} \let\appsubsubsecentry=\numsubsubsecentry \def\unnsubsubsecentry#1#2#3#4{\dosubsubsecentry{#1}{#4}} % This parameter controls the indentation of the various levels. % Same as \defaultparindent. \newdimen\tocindent \tocindent = 15pt % Now for the actual typesetting. In all these, #1 is the text and #2 is the % page number. % % If the toc has to be broken over pages, we want it to be at chapters % if at all possible; hence the \penalty. \def\dochapentry#1#2{% \penalty-300 \vskip1\baselineskip plus.33\baselineskip minus.25\baselineskip \begingroup % Move the page numbers slightly to the right \advance\entryrightmargin by -0.05em \chapentryfonts \tocentry{#1}{\dopageno\bgroup#2\egroup}% \endgroup \nobreak\vskip .25\baselineskip plus.1\baselineskip } \def\dosecentry#1#2{\begingroup \secentryfonts \leftskip=\tocindent \tocentry{#1}{\dopageno\bgroup#2\egroup}% \endgroup} \def\dosubsecentry#1#2{\begingroup \subsecentryfonts \leftskip=2\tocindent \tocentry{#1}{\dopageno\bgroup#2\egroup}% \endgroup} \def\dosubsubsecentry#1#2{\begingroup \subsubsecentryfonts \leftskip=3\tocindent \tocentry{#1}{\dopageno\bgroup#2\egroup}% \endgroup} % We use the same \entry macro as for the index entries. \let\tocentry = \entry % Space between chapter (or whatever) number and the title. \def\labelspace{\hskip1em \relax} \def\dopageno#1{{\rm #1}} \def\doshortpageno#1{{\rm #1}} \def\chapentryfonts{\secfonts \rm} \def\secentryfonts{\textfonts} \def\subsecentryfonts{\textfonts} \def\subsubsecentryfonts{\textfonts} \message{environments,} % @foo ... @end foo. % @tex ... @end tex escapes into raw TeX temporarily. % One exception: @ is still an escape character, so that @end tex works. % But \@ or @@ will get a plain @ character. \envdef\tex{% \setregularquotes \catcode `\\=0 \catcode `\{=1 \catcode `\}=2 \catcode `\$=3 \catcode `\&=4 \catcode `\#=6 \catcode `\^=7 \catcode `\_=8 \catcode `\~=\active \let~=\tie \catcode `\%=14 \catcode `\+=\other \catcode `\"=\other \catcode `\|=\other \catcode `\<=\other \catcode `\>=\other \catcode `\`=\other \catcode `\'=\other % % ' is active in math mode (mathcode"8000). So reset it, and all our % other math active characters (just in case), to plain's definitions. \mathactive % % Inverse of the list at the beginning of the file. \let\b=\ptexb \let\bullet=\ptexbullet \let\c=\ptexc \let\,=\ptexcomma \let\.=\ptexdot \let\dots=\ptexdots \let\equiv=\ptexequiv \let\!=\ptexexclam \let\i=\ptexi \let\indent=\ptexindent \let\noindent=\ptexnoindent \let\{=\ptexlbrace \let\+=\tabalign \let\}=\ptexrbrace \let\/=\ptexslash \let\sp=\ptexsp \let\*=\ptexstar %\let\sup=\ptexsup % do not redefine, we want @sup to work in math mode \let\t=\ptext \expandafter \let\csname top\endcsname=\ptextop % we've made it outer \let\frenchspacing=\plainfrenchspacing % \def\endldots{\mathinner{\ldots\ldots\ldots\ldots}}% \def\enddots{\relax\ifmmode\endldots\else$\mathsurround=0pt \endldots\,$\fi}% \def\@{@}% } % There is no need to define \Etex. % Define @lisp ... @end lisp. % @lisp environment forms a group so it can rebind things, % including the definition of @end lisp (which normally is erroneous). % Amount to narrow the margins by for @lisp. \newskip\lispnarrowing \lispnarrowing=0.4in % This is the definition that ^^M gets inside @lisp, @example, and other % such environments. \null is better than a space, since it doesn't % have any width. \def\lisppar{\null\endgraf} % This space is always present above and below environments. \newskip\envskipamount \envskipamount = 0pt % Make spacing and below environment symmetrical. We use \parskip here % to help in doing that, since in @example-like environments \parskip % is reset to zero; thus the \afterenvbreak inserts no space -- but the % start of the next paragraph will insert \parskip. % \def\aboveenvbreak{{% % =10000 instead of <10000 because of a special case in \itemzzz and % \sectionheading, q.v. \ifnum \lastpenalty=10000 \else \advance\envskipamount by \parskip \endgraf \ifdim\lastskip<\envskipamount \removelastskip \ifnum\lastpenalty<10000 % Penalize breaking before the environment, because preceding text % often leads into it. \penalty100 \fi \vskip\envskipamount \fi \fi }} \def\afterenvbreak{{% % =10000 instead of <10000 because of a special case in \itemzzz and % \sectionheading, q.v. \ifnum \lastpenalty=10000 \else \advance\envskipamount by \parskip \endgraf \ifdim\lastskip<\envskipamount \removelastskip % it's not a good place to break if the last penalty was \nobreak % or better ... \ifnum\lastpenalty<10000 \penalty-50 \fi \vskip\envskipamount \fi \fi }} % \nonarrowing is a flag. If "set", @lisp etc don't narrow margins; it will % also clear it, so that its embedded environments do the narrowing again. \let\nonarrowing=\relax % @cartouche ... @end cartouche: draw rectangle w/rounded corners around % environment contents. % \def\ctl{{\circle\char'013\hskip -6pt}}% 6pt from pl file: 1/2charwidth \def\ctr{{\hskip 6pt\circle\char'010}} \def\cbl{{\circle\char'012\hskip -6pt}} \def\cbr{{\hskip 6pt\circle\char'011}} \def\carttop{\hbox to \cartouter{\hskip\lskip \ctl\leaders\hrule height\circthick\hfil\ctr \hskip\rskip}} \def\cartbot{\hbox to \cartouter{\hskip\lskip \cbl\leaders\hrule height\circthick\hfil\cbr \hskip\rskip}} % \newskip\lskip\newskip\rskip % only require the font if @cartouche is actually used \def\cartouchefontdefs{% \font\circle=lcircle10\relax \circthick=\fontdimen8\circle } \newdimen\circthick \newdimen\cartouter\newdimen\cartinner \newskip\normbskip\newskip\normpskip\newskip\normlskip \envdef\cartouche{% \cartouchefontdefs \ifhmode\par\fi % can't be in the midst of a paragraph. \startsavinginserts \lskip=\leftskip \rskip=\rightskip \leftskip=0pt\rightskip=0pt % we want these *outside*. \cartinner=\hsize \advance\cartinner by-\lskip \advance\cartinner by-\rskip \cartouter=\hsize \advance\cartouter by 18.4pt % allow for 3pt kerns on either % side, and for 6pt waste from % each corner char, and rule thickness \normbskip=\baselineskip \normpskip=\parskip \normlskip=\lineskip % % If this cartouche directly follows a sectioning command, we need the % \parskip glue (backspaced over by default) or the cartouche can % collide with the section heading. \ifnum\lastpenalty>10000 \vskip\parskip \penalty\lastpenalty \fi % \setbox\groupbox=\vbox\bgroup \baselineskip=0pt\parskip=0pt\lineskip=0pt \carttop \hbox\bgroup \hskip\lskip \vrule\kern3pt \vbox\bgroup \kern3pt \hsize=\cartinner \baselineskip=\normbskip \lineskip=\normlskip \parskip=\normpskip \vskip -\parskip \comment % For explanation, see the end of def\group. } \def\Ecartouche{% \ifhmode\par\fi \kern3pt \egroup \kern3pt\vrule \hskip\rskip \egroup \cartbot \egroup \addgroupbox \checkinserts } % This macro is called at the beginning of all the @example variants, % inside a group. \newdimen\nonfillparindent \def\nonfillstart{% \aboveenvbreak \ifdim\hfuzz < 12pt \hfuzz = 12pt \fi % Don't be fussy \sepspaces % Make spaces be word-separators rather than space tokens. \let\par = \lisppar % don't ignore blank lines \obeylines % each line of input is a line of output \parskip = 0pt % Turn off paragraph indentation but redefine \indent to emulate % the normal \indent. \nonfillparindent=\parindent \parindent = 0pt \let\indent\nonfillindent % \emergencystretch = 0pt % don't try to avoid overfull boxes \ifx\nonarrowing\relax \advance \leftskip by \lispnarrowing \exdentamount=\lispnarrowing \else \let\nonarrowing = \relax \fi \let\exdent=\nofillexdent } \begingroup \obeyspaces % We want to swallow spaces (but not other tokens) after the fake % @indent in our nonfill-environments, where spaces are normally % active and set to @tie, resulting in them not being ignored after % @indent. \gdef\nonfillindent{\futurelet\temp\nonfillindentcheck}% \gdef\nonfillindentcheck{% \ifx\temp % \expandafter\nonfillindentgobble% \else% \leavevmode\nonfillindentbox% \fi% }% \endgroup \def\nonfillindentgobble#1{\nonfillindent} \def\nonfillindentbox{\hbox to \nonfillparindent{\hss}} % If you want all examples etc. small: @set dispenvsize small. % If you want even small examples the full size: @set dispenvsize nosmall. % This affects the following displayed environments: % @example, @display, @format, @lisp, @verbatim % \def\smallword{small} \def\nosmallword{nosmall} \let\SETdispenvsize\relax \def\setnormaldispenv{% \ifx\SETdispenvsize\smallword % end paragraph for sake of leading, in case document has no blank % line. This is redundant with what happens in \aboveenvbreak, but % we need to do it before changing the fonts, and it's inconvenient % to change the fonts afterward. \ifnum \lastpenalty=10000 \else \endgraf \fi \smallexamplefonts \rm \fi } \def\setsmalldispenv{% \ifx\SETdispenvsize\nosmallword \else \ifnum \lastpenalty=10000 \else \endgraf \fi \smallexamplefonts \rm \fi } % We often define two environments, @foo and @smallfoo. % Let's do it in one command. #1 is the env name, #2 the definition. \def\makedispenvdef#1#2{% \expandafter\envdef\csname#1\endcsname {\setnormaldispenv #2}% \expandafter\envdef\csname small#1\endcsname {\setsmalldispenv #2}% \expandafter\let\csname E#1\endcsname \afterenvbreak \expandafter\let\csname Esmall#1\endcsname \afterenvbreak } % Define two environment synonyms (#1 and #2) for an environment. \def\maketwodispenvdef#1#2#3{% \makedispenvdef{#1}{#3}% \makedispenvdef{#2}{#3}% } % % @lisp: indented, narrowed, typewriter font; % @example: same as @lisp. % % @smallexample and @smalllisp: use smaller fonts. % Originally contributed by Pavel@xerox. % \maketwodispenvdef{lisp}{example}{% \nonfillstart \tt\setcodequotes \let\kbdfont = \kbdexamplefont % Allow @kbd to do something special. \parsearg\gobble } % @display/@smalldisplay: same as @lisp except keep current font. % \makedispenvdef{display}{% \nonfillstart \gobble } % @format/@smallformat: same as @display except don't narrow margins. % \makedispenvdef{format}{% \let\nonarrowing = t% \nonfillstart \gobble } % @flushleft: same as @format, but doesn't obey \SETdispenvsize. \envdef\flushleft{% \let\nonarrowing = t% \nonfillstart \gobble } \let\Eflushleft = \afterenvbreak % @flushright. % \envdef\flushright{% \let\nonarrowing = t% \nonfillstart \advance\leftskip by 0pt plus 1fill\relax \gobble } \let\Eflushright = \afterenvbreak % @raggedright does more-or-less normal line breaking but no right % justification. From plain.tex. \envdef\raggedright{% \rightskip0pt plus2.4em \spaceskip.3333em \xspaceskip.5em\relax } \let\Eraggedright\par \envdef\raggedleft{% \parindent=0pt \leftskip0pt plus2em \spaceskip.3333em \xspaceskip.5em \parfillskip=0pt \hbadness=10000 % Last line will usually be underfull, so turn off % badness reporting. } \let\Eraggedleft\par \envdef\raggedcenter{% \parindent=0pt \rightskip0pt plus1em \leftskip0pt plus1em \spaceskip.3333em \xspaceskip.5em \parfillskip=0pt \hbadness=10000 % Last line will usually be underfull, so turn off % badness reporting. } \let\Eraggedcenter\par % @quotation does normal linebreaking (hence we can't use \nonfillstart) % and narrows the margins. We keep \parskip nonzero in general, since % we're doing normal filling. So, when using \aboveenvbreak and % \afterenvbreak, temporarily make \parskip 0. % \makedispenvdef{quotation}{\quotationstart} % \def\quotationstart{% \indentedblockstart % same as \indentedblock, but increase right margin too. \ifx\nonarrowing\relax \advance\rightskip by \lispnarrowing \fi \parsearg\quotationlabel } % We have retained a nonzero parskip for the environment, since we're % doing normal filling. % \def\Equotation{% \par \ifx\quotationauthor\thisisundefined\else % indent a bit. \leftline{\kern 2\leftskip \sl ---\quotationauthor}% \fi {\parskip=0pt \afterenvbreak}% } \def\Esmallquotation{\Equotation} % If we're given an argument, typeset it in bold with a colon after. \def\quotationlabel#1{% \def\temp{#1}% \ifx\temp\empty \else {\bf #1: }% \fi } % @indentedblock is like @quotation, but indents only on the left and % has no optional argument. % \makedispenvdef{indentedblock}{\indentedblockstart} % \def\indentedblockstart{% {\parskip=0pt \aboveenvbreak}% because \aboveenvbreak inserts \parskip \parindent=0pt % % @cartouche defines \nonarrowing to inhibit narrowing at next level down. \ifx\nonarrowing\relax \advance\leftskip by \lispnarrowing \exdentamount = \lispnarrowing \else \let\nonarrowing = \relax \fi } % Keep a nonzero parskip for the environment, since we're doing normal filling. % \def\Eindentedblock{% \par {\parskip=0pt \afterenvbreak}% } \def\Esmallindentedblock{\Eindentedblock} % LaTeX-like @verbatim...@end verbatim and @verb{...} % If we want to allow any as delimiter, % we need the curly braces so that makeinfo sees the @verb command, eg: % `@verbx...x' would look like the '@verbx' command. --janneke@gnu.org % % [Knuth]: Donald Ervin Knuth, 1996. The TeXbook. % % [Knuth] p.344; only we need to do the other characters Texinfo sets % active too. Otherwise, they get lost as the first character on a % verbatim line. \def\dospecials{% \do\ \do\\\do\{\do\}\do\$\do\&% \do\#\do\^\do\^^K\do\_\do\^^A\do\%\do\~% \do\<\do\>\do\|\do\@\do+\do\"% % Don't do the quotes -- if we do, @set txicodequoteundirected and % @set txicodequotebacktick will not have effect on @verb and % @verbatim, and ?` and !` ligatures won't get disabled. %\do\`\do\'% } % % [Knuth] p. 380 \def\uncatcodespecials{% \def\do##1{\catcode`##1=\other}\dospecials} % % Setup for the @verb command. % % Eight spaces for a tab \begingroup \catcode`\^^I=\active \gdef\tabeightspaces{\catcode`\^^I=\active\def^^I{\ \ \ \ \ \ \ \ }} \endgroup % \def\setupverb{% \tt % easiest (and conventionally used) font for verbatim \def\par{\leavevmode\endgraf}% \setcodequotes \tabeightspaces % Respect line breaks, % print special symbols as themselves, and % make each space count % must do in this order: \obeylines \uncatcodespecials \sepspaces } % Setup for the @verbatim environment % % Real tab expansion. \newdimen\tabw \setbox0=\hbox{\tt\space} \tabw=8\wd0 % tab amount % % We typeset each line of the verbatim in an \hbox, so we can handle % tabs. \newbox\verbbox \def\starttabbox{\setbox\verbbox=\hbox\bgroup} % \begingroup \catcode`\^^I=\active \gdef\tabexpand{% \catcode`\^^I=\active \def^^I{\leavevmode\egroup \dimen\verbbox=\wd\verbbox % the width so far, or since the previous tab \divide\dimen\verbbox by\tabw \multiply\dimen\verbbox by\tabw % compute previous multiple of \tabw \advance\dimen\verbbox by\tabw % advance to next multiple of \tabw \wd\verbbox=\dimen\verbbox \leavevmode\box\verbbox \starttabbox }% } \endgroup % start the verbatim environment. \def\setupverbatim{% \let\nonarrowing = t% \nonfillstart \tt % easiest (and conventionally used) font for verbatim \def\par{\egroup\leavevmode\box\verbbox\endgraf\starttabbox}% \tabexpand \setcodequotes % Respect line breaks, % print special symbols as themselves, and % make each space count. % Must do in this order: \obeylines \uncatcodespecials \sepspaces } % Do the @verb magic: verbatim text is quoted by unique % delimiter characters. Before first delimiter expect a % right brace, after last delimiter expect closing brace: % % \def\doverb'{'#1'}'{#1} % % [Knuth] p. 382; only eat outer {} \begingroup \catcode`[=1\catcode`]=2\catcode`\{=\other\catcode`\}=\other \gdef\doverb{#1[\def\next##1#1}[##1\endgroup]\next] \endgroup % \def\verb{\begingroup\setupverb\doverb} % % % Do the @verbatim magic: define the macro \doverbatim so that % the (first) argument ends when '@end verbatim' is reached, ie: % % \def\doverbatim#1@end verbatim{#1} % % For Texinfo it's a lot easier than for LaTeX, % because texinfo's \verbatim doesn't stop at '\end{verbatim}': % we need not redefine '\', '{' and '}'. % % Inspired by LaTeX's verbatim command set [latex.ltx] % \begingroup \catcode`\ =\active \obeylines % % ignore everything up to the first ^^M, that's the newline at the end % of the @verbatim input line itself. Otherwise we get an extra blank % line in the output. \xdef\doverbatim#1^^M#2@end verbatim{% \starttabbox#2\egroup\noexpand\end\gobble verbatim}% % We really want {...\end verbatim} in the body of the macro, but % without the active space; thus we have to use \xdef and \gobble. % The \egroup ends the \verbbox started at the end of the last line in % the block. \endgroup % \envdef\verbatim{% \setnormaldispenv\setupverbatim\doverbatim } \let\Everbatim = \afterenvbreak % @verbatiminclude FILE - insert text of file in verbatim environment. % \def\verbatiminclude{\parseargusing\filenamecatcodes\doverbatiminclude} % \def\doverbatiminclude#1{% {% \makevalueexpandable \setupverbatim {% \indexnofonts % Allow `@@' and other weird things in file names. \wlog{texinfo.tex: doing @verbatiminclude of #1^^J}% \edef\tmp{\noexpand\input #1 } \expandafter }\expandafter\starttabbox\tmp\egroup \afterenvbreak }% } % @copying ... @end copying. % Save the text away for @insertcopying later. % % We save the uninterpreted tokens, rather than creating a box. % Saving the text in a box would be much easier, but then all the % typesetting commands (@smallbook, font changes, etc.) have to be done % beforehand -- and a) we want @copying to be done first in the source % file; b) letting users define the frontmatter in as flexible order as % possible is desirable. % \def\copying{\checkenv{}\begingroup\scanargctxt\docopying} \def\docopying#1@end copying{\endgroup\def\copyingtext{#1}} % \def\insertcopying{% \begingroup \parindent = 0pt % paragraph indentation looks wrong on title page \scanexp\copyingtext \endgroup } \message{defuns,} % @defun etc. \newskip\defbodyindent \defbodyindent=.4in \newskip\defargsindent \defargsindent=50pt \newskip\deflastargmargin \deflastargmargin=18pt \newcount\defunpenalty % Start the processing of @deffn: \def\startdefun{% \ifnum\lastpenalty<10000 \medbreak \defunpenalty=10003 % Will keep this @deffn together with the % following @def command, see below. \else % If there are two @def commands in a row, we'll have a \nobreak, % which is there to keep the function description together with its % header. But if there's nothing but headers, we need to allow a % break somewhere. Check specifically for penalty 10002, inserted % by \printdefunline, instead of 10000, since the sectioning % commands also insert a nobreak penalty, and we don't want to allow % a break between a section heading and a defun. % % As a further refinement, we avoid "club" headers by signalling % with penalty of 10003 after the very first @deffn in the % sequence (see above), and penalty of 10002 after any following % @def command. \ifnum\lastpenalty=10002 \penalty2000 \else \defunpenalty=10002 \fi % % Similarly, after a section heading, do not allow a break. % But do insert the glue. \medskip % preceded by discardable penalty, so not a breakpoint \fi % \parindent=0in \advance\leftskip by \defbodyindent \exdentamount=\defbodyindent } \def\dodefunx#1{% % First, check whether we are in the right environment: \checkenv#1% % % As above, allow line break if we have multiple x headers in a row. % It's not a great place, though. \ifnum\lastpenalty=10002 \penalty3000 \else \defunpenalty=10002 \fi % % And now, it's time to reuse the body of the original defun: \expandafter\gobbledefun#1% } \def\gobbledefun#1\startdefun{} % \printdefunline \deffnheader{text} % \def\printdefunline#1#2{% \begingroup % call \deffnheader: #1#2 \endheader % common ending: \interlinepenalty = 10000 \advance\rightskip by 0pt plus 1fil\relax \endgraf \nobreak\vskip -\parskip \penalty\defunpenalty % signal to \startdefun and \dodefunx % Some of the @defun-type tags do not enable magic parentheses, % rendering the following check redundant. But we don't optimize. \checkparencounts \endgroup } \def\Edefun{\endgraf\medbreak} % \makedefun{deffn} creates \deffn, \deffnx and \Edeffn; % the only thing remaining is to define \deffnheader. % \def\makedefun#1{% \expandafter\let\csname E#1\endcsname = \Edefun \edef\temp{\noexpand\domakedefun \makecsname{#1}\makecsname{#1x}\makecsname{#1header}}% \temp } % \domakedefun \deffn \deffnx \deffnheader { (defn. of \deffnheader) } % % Define \deffn and \deffnx, without parameters. % \deffnheader has to be defined explicitly. % \def\domakedefun#1#2#3{% \envdef#1{% \startdefun \doingtypefnfalse % distinguish typed functions from all else \parseargusing\activeparens{\printdefunline#3}% }% \def#2{\dodefunx#1}% \def#3% } \newif\ifdoingtypefn % doing typed function? \newif\ifrettypeownline % typeset return type on its own line? % @deftypefnnewline on|off says whether the return type of typed functions % are printed on their own line. This affects @deftypefn, @deftypefun, % @deftypeop, and @deftypemethod. % \parseargdef\deftypefnnewline{% \def\temp{#1}% \ifx\temp\onword \expandafter\let\csname SETtxideftypefnnl\endcsname = \empty \else\ifx\temp\offword \expandafter\let\csname SETtxideftypefnnl\endcsname = \relax \else \errhelp = \EMsimple \errmessage{Unknown @txideftypefnnl value `\temp', must be on|off}% \fi\fi } % \dosubind {index}{topic}{subtopic} % % If SUBTOPIC is present, precede it with a space, and call \doind. % (At some time during the 20th century, this made a two-level entry in an % index such as the operation index. Nobody seemed to notice the change in % behaviour though.) \def\dosubind#1#2#3{% \def\thirdarg{#3}% \ifx\thirdarg\empty \doind{#1}{#2}% \else \doind{#1}{#2\space#3}% \fi } % Untyped functions: % @deffn category name args \makedefun{deffn}{\deffngeneral{}} % @deffn category class name args \makedefun{defop}#1 {\defopon{#1\ \putwordon}} % \defopon {category on}class name args \def\defopon#1#2 {\deffngeneral{\putwordon\ \code{#2}}{#1\ \code{#2}} } % \deffngeneral {subind}category name args % \def\deffngeneral#1#2 #3 #4\endheader{% \dosubind{fn}{\code{#3}}{#1}% \defname{#2}{}{#3}\magicamp\defunargs{#4\unskip}% } % Typed functions: % @deftypefn category type name args \makedefun{deftypefn}{\deftypefngeneral{}} % @deftypeop category class type name args \makedefun{deftypeop}#1 {\deftypeopon{#1\ \putwordon}} % \deftypeopon {category on}class type name args \def\deftypeopon#1#2 {\deftypefngeneral{\putwordon\ \code{#2}}{#1\ \code{#2}} } % \deftypefngeneral {subind}category type name args % \def\deftypefngeneral#1#2 #3 #4 #5\endheader{% \dosubind{fn}{\code{#4}}{#1}% \doingtypefntrue \defname{#2}{#3}{#4}\defunargs{#5\unskip}% } % Typed variables: % @deftypevr category type var args \makedefun{deftypevr}{\deftypecvgeneral{}} % @deftypecv category class type var args \makedefun{deftypecv}#1 {\deftypecvof{#1\ \putwordof}} % \deftypecvof {category of}class type var args \def\deftypecvof#1#2 {\deftypecvgeneral{\putwordof\ \code{#2}}{#1\ \code{#2}} } % \deftypecvgeneral {subind}category type var args % \def\deftypecvgeneral#1#2 #3 #4 #5\endheader{% \dosubind{vr}{\code{#4}}{#1}% \defname{#2}{#3}{#4}\defunargs{#5\unskip}% } % Untyped variables: % @defvr category var args \makedefun{defvr}#1 {\deftypevrheader{#1} {} } % @defcv category class var args \makedefun{defcv}#1 {\defcvof{#1\ \putwordof}} % \defcvof {category of}class var args \def\defcvof#1#2 {\deftypecvof{#1}#2 {} } % Types: % @deftp category name args \makedefun{deftp}#1 #2 #3\endheader{% \doind{tp}{\code{#2}}% \defname{#1}{}{#2}\defunargs{#3\unskip}% } % Remaining @defun-like shortcuts: \makedefun{defun}{\deffnheader{\putwordDeffunc} } \makedefun{defmac}{\deffnheader{\putwordDefmac} } \makedefun{defspec}{\deffnheader{\putwordDefspec} } \makedefun{deftypefun}{\deftypefnheader{\putwordDeffunc} } \makedefun{defvar}{\defvrheader{\putwordDefvar} } \makedefun{defopt}{\defvrheader{\putwordDefopt} } \makedefun{deftypevar}{\deftypevrheader{\putwordDefvar} } \makedefun{defmethod}{\defopon\putwordMethodon} \makedefun{deftypemethod}{\deftypeopon\putwordMethodon} \makedefun{defivar}{\defcvof\putwordInstanceVariableof} \makedefun{deftypeivar}{\deftypecvof\putwordInstanceVariableof} % \defname, which formats the name of the @def (not the args). % #1 is the category, such as "Function". % #2 is the return type, if any. % #3 is the function name. % % We are followed by (but not passed) the arguments, if any. % \def\defname#1#2#3{% \par % Get the values of \leftskip and \rightskip as they were outside the @def... \advance\leftskip by -\defbodyindent % % Determine if we are typesetting the return type of a typed function % on a line by itself. \rettypeownlinefalse \ifdoingtypefn % doing a typed function specifically? % then check user option for putting return type on its own line: \expandafter\ifx\csname SETtxideftypefnnl\endcsname\relax \else \rettypeownlinetrue \fi \fi % % How we'll format the category name. Putting it in brackets helps % distinguish it from the body text that may end up on the next line % just below it. \def\temp{#1}% \setbox0=\hbox{\kern\deflastargmargin \ifx\temp\empty\else [\rm\temp]\fi} % % Figure out line sizes for the paragraph shape. We'll always have at % least two. \tempnum = 2 % % The first line needs space for \box0; but if \rightskip is nonzero, % we need only space for the part of \box0 which exceeds it: \dimen0=\hsize \advance\dimen0 by -\wd0 \advance\dimen0 by \rightskip % % If doing a return type on its own line, we'll have another line. \ifrettypeownline \advance\tempnum by 1 \def\maybeshapeline{0in \hsize}% \else \def\maybeshapeline{}% \fi % % The continuations: \dimen2=\hsize \advance\dimen2 by -\defargsindent % % The final paragraph shape: \parshape \tempnum 0in \dimen0 \maybeshapeline \defargsindent \dimen2 % % Put the category name at the right margin. \noindent \hbox to 0pt{% \hfil\box0 \kern-\hsize % \hsize has to be shortened this way: \kern\leftskip % Intentionally do not respect \rightskip, since we need the space. }% % % Allow all lines to be underfull without complaint: \tolerance=10000 \hbadness=10000 \exdentamount=\defbodyindent {% % defun fonts. We use typewriter by default (used to be bold) because: % . we're printing identifiers, they should be in tt in principle. % . in languages with many accents, such as Czech or French, it's % common to leave accents off identifiers. The result looks ok in % tt, but exceedingly strange in rm. % . we don't want -- and --- to be treated as ligatures. % . this still does not fix the ?` and !` ligatures, but so far no % one has made identifiers using them :). \df \tt \def\temp{#2}% text of the return type \ifx\temp\empty\else \tclose{\temp}% typeset the return type \ifrettypeownline % put return type on its own line; prohibit line break following: \hfil\vadjust{\nobreak}\break \else \space % type on same line, so just followed by a space \fi \fi % no return type #3% output function name }% {\rm\enskip}% hskip 0.5 em of \rmfont % \boldbrax % arguments will be output next, if any. } % Print arguments in slanted roman (not ttsl), inconsistently with using % tt for the name. This is because literal text is sometimes needed in % the argument list (groff manual), and ttsl and tt are not very % distinguishable. Prevent hyphenation at `-' chars. % \def\defunargs#1{% % use sl by default (not ttsl), % tt for the names. \df \sl \hyphenchar\font=0 % % On the other hand, if an argument has two dashes (for instance), we % want a way to get ttsl. We used to recommend @var for that, so % leave the code in, but it's strange for @var to lead to typewriter. % Nowadays we recommend @code, since the difference between a ttsl hyphen % and a tt hyphen is pretty tiny. @code also disables ?` !`. \def\var##1{{\setregularquotes\ttslanted{##1}}}% #1% \sl\hyphenchar\font=45 } % We want ()&[] to print specially on the defun line. % \def\activeparens{% \catcode`\(=\active \catcode`\)=\active \catcode`\[=\active \catcode`\]=\active \catcode`\&=\active } % Make control sequences which act like normal parenthesis chars. \let\lparen = ( \let\rparen = ) % Be sure that we always have a definition for `(', etc. For example, % if the fn name has parens in it, \boldbrax will not be in effect yet, % so TeX would otherwise complain about undefined control sequence. { \activeparens \global\let(=\lparen \global\let)=\rparen \global\let[=\lbrack \global\let]=\rbrack \global\let& = \& \gdef\boldbrax{\let(=\opnr\let)=\clnr\let[=\lbrb\let]=\rbrb} \gdef\magicamp{\let&=\amprm} } \let\ampchar\& \newcount\parencount % If we encounter &foo, then turn on ()-hacking afterwards \newif\ifampseen \def\amprm#1 {\ampseentrue{\bf\ }} \def\parenfont{% \ifampseen % At the first level, print parens in roman, % otherwise use the default font. \ifnum \parencount=1 \rm \fi \else % The \sf parens (in \boldbrax) actually are a little bolder than % the contained text. This is especially needed for [ and ] . \sf \fi } \def\infirstlevel#1{% \ifampseen \ifnum\parencount=1 #1% \fi \fi } \def\bfafterword#1 {#1 \bf} \def\opnr{% \global\advance\parencount by 1 {\parenfont(}% \infirstlevel \bfafterword } \def\clnr{% {\parenfont)}% \infirstlevel \sl \global\advance\parencount by -1 } \newcount\brackcount \def\lbrb{% \global\advance\brackcount by 1 {\bf[}% } \def\rbrb{% {\bf]}% \global\advance\brackcount by -1 } \def\checkparencounts{% \ifnum\parencount=0 \else \badparencount \fi \ifnum\brackcount=0 \else \badbrackcount \fi } % these should not use \errmessage; the glibc manual, at least, actually % has such constructs (when documenting function pointers). \def\badparencount{% \message{Warning: unbalanced parentheses in @def...}% \global\parencount=0 } \def\badbrackcount{% \message{Warning: unbalanced square brackets in @def...}% \global\brackcount=0 } \message{macros,} % @macro. % To do this right we need a feature of e-TeX, \scantokens, % which we arrange to emulate with a temporary file in ordinary TeX. \ifx\eTeXversion\thisisundefined \newwrite\macscribble \def\scantokens#1{% \toks0={#1}% \immediate\openout\macscribble=\jobname.tmp \immediate\write\macscribble{\the\toks0}% \immediate\closeout\macscribble \input \jobname.tmp } \fi \let\E=\expandafter % Used at the time of macro expansion. % Argument is macro body with arguments substituted \def\scanmacro#1{% \newlinechar`\^^M % expand the expansion of \eatleadingcr twice to maybe remove a leading % newline (and \else and \fi tokens), then call \eatspaces on the result. \def\xeatspaces##1{% \E\E\E\E\E\E\E\eatspaces\E\E\E\E\E\E\E{\eatleadingcr##1% }}% \def\xempty##1{}% % % Process the macro body under the current catcode regime. \scantokens{#1@comment}% % % The \comment is to remove the \newlinechar added by \scantokens, and % can be noticed by \parsearg. Note \c isn't used because this means cedilla % in math mode. } % Used for copying and captions \def\scanexp#1{% \expandafter\scanmacro\expandafter{#1}% } \newcount\paramno % Count of parameters \newtoks\macname % Macro name \newif\ifrecursive % Is it recursive? % List of all defined macros in the form % \commondummyword\macro1\commondummyword\macro2... % Currently is also contains all @aliases; the list can be split % if there is a need. \def\macrolist{} % Add the macro to \macrolist \def\addtomacrolist#1{\expandafter \addtomacrolistxxx \csname#1\endcsname} \def\addtomacrolistxxx#1{% \toks0 = \expandafter{\macrolist\commondummyword#1}% \xdef\macrolist{\the\toks0}% } % Utility routines. % This does \let #1 = #2, with \csnames; that is, % \let \csname#1\endcsname = \csname#2\endcsname % (except of course we have to play expansion games). % \def\cslet#1#2{% \expandafter\let \csname#1\expandafter\endcsname \csname#2\endcsname } % Trim leading and trailing spaces off a string. % Concepts from aro-bend problem 15 (see CTAN). {\catcode`\@=11 \gdef\eatspaces #1{\expandafter\trim@\expandafter{#1 }} \gdef\trim@ #1{\trim@@ @#1 @ #1 @ @@} \gdef\trim@@ #1@ #2@ #3@@{\trim@@@\empty #2 @} \def\unbrace#1{#1} \unbrace{\gdef\trim@@@ #1 } #2@{#1} } {\catcode`\^^M=\other% \gdef\eatleadingcr#1{\if\noexpand#1\noexpand^^M\else\E#1\fi}}% % Warning: this won't work for a delimited argument % or for an empty argument % Trim a single trailing ^^M off a string. {\catcode`\^^M=\other \catcode`\Q=3% \gdef\eatcr #1{\eatcra #1Q^^MQ}% \gdef\eatcra#1^^MQ{\eatcrb#1Q}% \gdef\eatcrb#1Q#2Q{#1}% } % Macro bodies are absorbed as an argument in a context where % all characters are catcode 10, 11 or 12, except \ which is active % (as in normal texinfo). It is necessary to change the definition of \ % to recognize macro arguments; this is the job of \mbodybackslash. % % Non-ASCII encodings make 8-bit characters active, so un-activate % them to avoid their expansion. Must do this non-globally, to % confine the change to the current group. % % It's necessary to have hard CRs when the macro is executed. This is % done by making ^^M (\endlinechar) catcode 12 when reading the macro % body, and then making it the \newlinechar in \scanmacro. % \def\scanctxt{% used as subroutine \catcode`\"=\other \catcode`\+=\other \catcode`\<=\other \catcode`\>=\other \catcode`\^=\other \catcode`\_=\other \catcode`\|=\other \catcode`\~=\other \passthroughcharstrue } \def\scanargctxt{% used for copying and captions, not macros. \scanctxt \catcode`\@=\other \catcode`\\=\other \catcode`\^^M=\other } \def\macrobodyctxt{% used for @macro definitions \scanctxt \catcode`\ =\other \catcode`\@=\other \catcode`\{=\other \catcode`\}=\other \catcode`\^^M=\other \usembodybackslash } % Used when scanning braced macro arguments. Note, however, that catcode % changes here are ineffectual if the macro invocation was nested inside % an argument to another Texinfo command. \def\macroargctxt{% \scanctxt \catcode`\ =\active \catcode`\@=\other \catcode`\^^M=\other \catcode`\\=\active } \def\macrolineargctxt{% used for whole-line arguments without braces \scanctxt \catcode`\@=\other \catcode`\{=\other \catcode`\}=\other } % \mbodybackslash is the definition of \ in @macro bodies. % It maps \foo\ => \csname macarg.foo\endcsname => #N % where N is the macro parameter number. % We define \csname macarg.\endcsname to be \realbackslash, so % \\ in macro replacement text gets you a backslash. % {\catcode`@=0 @catcode`@\=@active @gdef@usembodybackslash{@let\=@mbodybackslash} @gdef@mbodybackslash#1\{@csname macarg.#1@endcsname} } \expandafter\def\csname macarg.\endcsname{\realbackslash} \def\margbackslash#1{\char`\#1 } \def\macro{\recursivefalse\parsearg\macroxxx} \def\rmacro{\recursivetrue\parsearg\macroxxx} \def\macroxxx#1{% \getargs{#1}% now \macname is the macname and \argl the arglist \ifx\argl\empty % no arguments \paramno=0\relax \else \expandafter\parsemargdef \argl;% \if\paramno>256\relax \ifx\eTeXversion\thisisundefined \errhelp = \EMsimple \errmessage{You need eTeX to compile a file with macros with more than 256 arguments} \fi \fi \fi \if1\csname ismacro.\the\macname\endcsname \message{Warning: redefining \the\macname}% \else \expandafter\ifx\csname \the\macname\endcsname \relax \else \errmessage{Macro name \the\macname\space already defined}\fi \global\cslet{macsave.\the\macname}{\the\macname}% \global\expandafter\let\csname ismacro.\the\macname\endcsname=1% \addtomacrolist{\the\macname}% \fi \begingroup \macrobodyctxt \ifrecursive \expandafter\parsermacbody \else \expandafter\parsemacbody \fi} \parseargdef\unmacro{% \if1\csname ismacro.#1\endcsname \global\cslet{#1}{macsave.#1}% \global\expandafter\let \csname ismacro.#1\endcsname=0% % Remove the macro name from \macrolist: \begingroup \expandafter\let\csname#1\endcsname \relax \let\commondummyword\unmacrodo \xdef\macrolist{\macrolist}% \endgroup \else \errmessage{Macro #1 not defined}% \fi } % Called by \do from \dounmacro on each macro. The idea is to omit any % macro definitions that have been changed to \relax. % \def\unmacrodo#1{% \ifx #1\relax % remove this \else \noexpand\commondummyword \noexpand#1% \fi } % \getargs -- Parse the arguments to a @macro line. Set \macname to % the name of the macro, and \argl to the braced argument list. \def\getargs#1{\getargsxxx#1{}} \def\getargsxxx#1#{\getmacname #1 \relax\getmacargs} \def\getmacname#1 #2\relax{\macname={#1}} \def\getmacargs#1{\def\argl{#1}} % This made use of the feature that if the last token of a % is #, then the preceding argument is delimited by % an opening brace, and that opening brace is not consumed. % Parse the optional {params} list to @macro or @rmacro. % Set \paramno to the number of arguments, % and \paramlist to a parameter text for the macro (e.g. #1,#2,#3 for a % three-param macro.) Define \macarg.BLAH for each BLAH in the params % list to some hook where the argument is to be expanded. If there are % less than 10 arguments that hook is to be replaced by ##N where N % is the position in that list, that is to say the macro arguments are to be % defined `a la TeX in the macro body. % % That gets used by \mbodybackslash (above). % % If there are 10 or more arguments, a different technique is used: see % \parsemmanyargdef. % \def\parsemargdef#1;{% \paramno=0\def\paramlist{}% \let\hash\relax % \hash is redefined to `#' later to get it into definitions \let\xeatspaces\relax \let\xempty\relax \parsemargdefxxx#1,;,% \ifnum\paramno<10\relax\else \paramno0\relax \parsemmanyargdef@@#1,;,% 10 or more arguments \fi } \def\parsemargdefxxx#1,{% \if#1;\let\next=\relax \else \let\next=\parsemargdefxxx \advance\paramno by 1 \expandafter\edef\csname macarg.\eatspaces{#1}\endcsname {\xeatspaces{\hash\the\paramno\noexpand\xempty{}}}% \edef\paramlist{\paramlist\hash\the\paramno,}% \fi\next} % the \xempty{} is to give \eatleadingcr an argument in the case of an % empty macro argument. % \parsemacbody, \parsermacbody % % Read recursive and nonrecursive macro bodies. (They're different since % rec and nonrec macros end differently.) % % We are in \macrobodyctxt, and the \xdef causes backslashshes in the macro % body to be transformed. % Set \macrobody to the body of the macro, and call \defmacro. % {\catcode`\ =\other\long\gdef\parsemacbody#1@end macro{% \xdef\macrobody{\eatcr{#1}}\endgroup\defmacro}}% {\catcode`\ =\other\long\gdef\parsermacbody#1@end rmacro{% \xdef\macrobody{\eatcr{#1}}\endgroup\defmacro}}% % Make @ a letter, so that we can make private-to-Texinfo macro names. \edef\texiatcatcode{\the\catcode`\@} \catcode `@=11\relax %%%%%%%%%%%%%% Code for > 10 arguments only %%%%%%%%%%%%%%%%%% % If there are 10 or more arguments, a different technique is used, where the % hook remains in the body, and when macro is to be expanded the body is % processed again to replace the arguments. % % In that case, the hook is \the\toks N-1, and we simply set \toks N-1 to the % argument N value and then \edef the body (nothing else will expand because of % the catcode regime under which the body was input). % % If you compile with TeX (not eTeX), and you have macros with 10 or more % arguments, no macro can have more than 256 arguments (else error). % % In case that there are 10 or more arguments we parse again the arguments % list to set new definitions for the \macarg.BLAH macros corresponding to % each BLAH argument. It was anyhow needed to parse already once this list % in order to count the arguments, and as macros with at most 9 arguments % are by far more frequent than macro with 10 or more arguments, defining % twice the \macarg.BLAH macros does not cost too much processing power. \def\parsemmanyargdef@@#1,{% \if#1;\let\next=\relax \else \let\next=\parsemmanyargdef@@ \edef\tempb{\eatspaces{#1}}% \expandafter\def\expandafter\tempa \expandafter{\csname macarg.\tempb\endcsname}% % Note that we need some extra \noexpand\noexpand, this is because we % don't want \the to be expanded in the \parsermacbody as it uses an % \xdef . \expandafter\edef\tempa {\noexpand\noexpand\noexpand\the\toks\the\paramno}% \advance\paramno by 1\relax \fi\next} \let\endargs@\relax \let\nil@\relax \def\nilm@{\nil@}% \long\def\nillm@{\nil@}% % This macro is expanded during the Texinfo macro expansion, not during its % definition. It gets all the arguments' values and assigns them to macros % macarg.ARGNAME % % #1 is the macro name % #2 is the list of argument names % #3 is the list of argument values \def\getargvals@#1#2#3{% \def\macargdeflist@{}% \def\saveparamlist@{#2}% Need to keep a copy for parameter expansion. \def\paramlist{#2,\nil@}% \def\macroname{#1}% \begingroup \macroargctxt \def\argvaluelist{#3,\nil@}% \def\@tempa{#3}% \ifx\@tempa\empty \setemptyargvalues@ \else \getargvals@@ \fi } \def\getargvals@@{% \ifx\paramlist\nilm@ % Some sanity check needed here that \argvaluelist is also empty. \ifx\argvaluelist\nillm@ \else \errhelp = \EMsimple \errmessage{Too many arguments in macro `\macroname'!}% \fi \let\next\macargexpandinbody@ \else \ifx\argvaluelist\nillm@ % No more arguments values passed to macro. Set remaining named-arg % macros to empty. \let\next\setemptyargvalues@ \else % pop current arg name into \@tempb \def\@tempa##1{\pop@{\@tempb}{\paramlist}##1\endargs@}% \expandafter\@tempa\expandafter{\paramlist}% % pop current argument value into \@tempc \def\@tempa##1{\longpop@{\@tempc}{\argvaluelist}##1\endargs@}% \expandafter\@tempa\expandafter{\argvaluelist}% % Here \@tempb is the current arg name and \@tempc is the current arg value. % First place the new argument macro definition into \@tempd \expandafter\macname\expandafter{\@tempc}% \expandafter\let\csname macarg.\@tempb\endcsname\relax \expandafter\def\expandafter\@tempe\expandafter{% \csname macarg.\@tempb\endcsname}% \edef\@tempd{\long\def\@tempe{\the\macname}}% \push@\@tempd\macargdeflist@ \let\next\getargvals@@ \fi \fi \next } \def\push@#1#2{% \expandafter\expandafter\expandafter\def \expandafter\expandafter\expandafter#2% \expandafter\expandafter\expandafter{% \expandafter#1#2}% } % Replace arguments by their values in the macro body, and place the result % in macro \@tempa. % \def\macvalstoargs@{% % To do this we use the property that token registers that are \the'ed % within an \edef expand only once. So we are going to place all argument % values into respective token registers. % % First we save the token context, and initialize argument numbering. \begingroup \paramno0\relax % Then, for each argument number #N, we place the corresponding argument % value into a new token list register \toks#N \expandafter\putargsintokens@\saveparamlist@,;,% % Then, we expand the body so that argument are replaced by their % values. The trick for values not to be expanded themselves is that they % are within tokens and that tokens expand only once in an \edef . \edef\@tempc{\csname mac.\macroname .body\endcsname}% % Now we restore the token stack pointer to free the token list registers % which we have used, but we make sure that expanded body is saved after % group. \expandafter \endgroup \expandafter\def\expandafter\@tempa\expandafter{\@tempc}% } % Define the named-macro outside of this group and then close this group. % \def\macargexpandinbody@{% \expandafter \endgroup \macargdeflist@ % First the replace in body the macro arguments by their values, the result % is in \@tempa . \macvalstoargs@ % Then we point at the \norecurse or \gobble (for recursive) macro value % with \@tempb . \expandafter\let\expandafter\@tempb\csname mac.\macroname .recurse\endcsname % Depending on whether it is recursive or not, we need some tailing % \egroup . \ifx\@tempb\gobble \let\@tempc\relax \else \let\@tempc\egroup \fi % And now we do the real job: \edef\@tempd{\noexpand\@tempb{\macroname}\noexpand\scanmacro{\@tempa}\@tempc}% \@tempd } \def\putargsintokens@#1,{% \if#1;\let\next\relax \else \let\next\putargsintokens@ % First we allocate the new token list register, and give it a temporary % alias \@tempb . \toksdef\@tempb\the\paramno % Then we place the argument value into that token list register. \expandafter\let\expandafter\@tempa\csname macarg.#1\endcsname \expandafter\@tempb\expandafter{\@tempa}% \advance\paramno by 1\relax \fi \next } % Trailing missing arguments are set to empty. % \def\setemptyargvalues@{% \ifx\paramlist\nilm@ \let\next\macargexpandinbody@ \else \expandafter\setemptyargvaluesparser@\paramlist\endargs@ \let\next\setemptyargvalues@ \fi \next } \def\setemptyargvaluesparser@#1,#2\endargs@{% \expandafter\def\expandafter\@tempa\expandafter{% \expandafter\def\csname macarg.#1\endcsname{}}% \push@\@tempa\macargdeflist@ \def\paramlist{#2}% } % #1 is the element target macro % #2 is the list macro % #3,#4\endargs@ is the list value \def\pop@#1#2#3,#4\endargs@{% \def#1{#3}% \def#2{#4}% } \long\def\longpop@#1#2#3,#4\endargs@{% \long\def#1{#3}% \long\def#2{#4}% } %%%%%%%%%%%%%% End of code for > 10 arguments %%%%%%%%%%%%%%%%%% % This defines a Texinfo @macro or @rmacro, called by \parsemacbody. % \macrobody has the body of the macro in it, with placeholders for % its parameters, looking like "\xeatspaces{\hash 1}". % \paramno is the number of parameters % \paramlist is a TeX parameter text, e.g. "#1,#2,#3," % There are four cases: macros of zero, one, up to nine, and many arguments. % \xdef is used so that macro definitions will survive the file % they're defined in: @include reads the file inside a group. % \def\defmacro{% \let\hash=##% convert placeholders to macro parameter chars \ifnum\paramno=1 \def\xeatspaces##1{##1}% % This removes the pair of braces around the argument. We don't % use \eatspaces, because this can cause ends of lines to be lost % when the argument to \eatspaces is read, leading to line-based % commands like "@itemize" not being read correctly. \else \let\xeatspaces\relax % suppress expansion \fi \ifcase\paramno % 0 \expandafter\xdef\csname\the\macname\endcsname{% \bgroup \noexpand\spaceisspace \noexpand\endlineisspace \noexpand\expandafter % skip any whitespace after the macro name. \expandafter\noexpand\csname\the\macname @@@\endcsname}% \expandafter\xdef\csname\the\macname @@@\endcsname{% \egroup \noexpand\scanmacro{\macrobody}}% \or % 1 \expandafter\xdef\csname\the\macname\endcsname{% \bgroup \noexpand\braceorline \expandafter\noexpand\csname\the\macname @@@\endcsname}% \expandafter\xdef\csname\the\macname @@@\endcsname##1{% \egroup \noexpand\scanmacro{\macrobody}% }% \else % at most 9 \ifnum\paramno<10\relax % @MACNAME sets the context for reading the macro argument % @MACNAME@@ gets the argument, processes backslashes and appends a % comma. % @MACNAME@@@ removes braces surrounding the argument list. % @MACNAME@@@@ scans the macro body with arguments substituted. \expandafter\xdef\csname\the\macname\endcsname{% \bgroup \noexpand\expandafter % This \expandafter skip any spaces after the \noexpand\macroargctxt % macro before we change the catcode of space. \noexpand\expandafter \expandafter\noexpand\csname\the\macname @@\endcsname}% \expandafter\xdef\csname\the\macname @@\endcsname##1{% \noexpand\passargtomacro \expandafter\noexpand\csname\the\macname @@@\endcsname{##1,}}% \expandafter\xdef\csname\the\macname @@@\endcsname##1{% \expandafter\noexpand\csname\the\macname @@@@\endcsname ##1}% \expandafter\expandafter \expandafter\xdef \expandafter\expandafter \csname\the\macname @@@@\endcsname\paramlist{% \egroup\noexpand\scanmacro{\macrobody}}% \else % 10 or more: \expandafter\xdef\csname\the\macname\endcsname{% \noexpand\getargvals@{\the\macname}{\argl}% }% \global\expandafter\let\csname mac.\the\macname .body\endcsname\macrobody \global\expandafter\let\csname mac.\the\macname .recurse\endcsname\gobble \fi \fi} \catcode `\@\texiatcatcode\relax % end private-to-Texinfo catcodes \def\norecurse#1{\bgroup\cslet{#1}{macsave.#1}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % {\catcode`\@=0 \catcode`\\=13 % We need to manipulate \ so use @ as escape @catcode`@_=11 % private names @catcode`@!=11 % used as argument separator % \passargtomacro#1#2 - % Call #1 with a list of tokens #2, with any doubled backslashes in #2 % compressed to one. % % This implementation works by expansion, and not execution (so we cannot use % \def or similar). This reduces the risk of this failing in contexts where % complete expansion is done with no execution (for example, in writing out to % an auxiliary file for an index entry). % % State is kept in the input stream: the argument passed to % @look_ahead, @gobble_and_check_finish and @add_segment is % % THE_MACRO ARG_RESULT ! {PENDING_BS} NEXT_TOKEN (... rest of input) % % where: % THE_MACRO - name of the macro we want to call % ARG_RESULT - argument list we build to pass to that macro % PENDING_BS - either a backslash or nothing % NEXT_TOKEN - used to look ahead in the input stream to see what's coming next @gdef@passargtomacro#1#2{% @add_segment #1!{}@relax#2\@_finish\% } @gdef@_finish{@_finishx} @global@let@_finishx@relax % #1 - THE_MACRO ARG_RESULT % #2 - PENDING_BS % #3 - NEXT_TOKEN % #4 used to look ahead % % If the next token is not a backslash, process the rest of the argument; % otherwise, remove the next token. @gdef@look_ahead#1!#2#3#4{% @ifx#4\% @expandafter@gobble_and_check_finish @else @expandafter@add_segment @fi#1!{#2}#4#4% } % #1 - THE_MACRO ARG_RESULT % #2 - PENDING_BS % #3 - NEXT_TOKEN % #4 should be a backslash, which is gobbled. % #5 looks ahead % % Double backslash found. Add a single backslash, and look ahead. @gdef@gobble_and_check_finish#1!#2#3#4#5{% @add_segment#1\!{}#5#5% } @gdef@is_fi{@fi} % #1 - THE_MACRO ARG_RESULT % #2 - PENDING_BS % #3 - NEXT_TOKEN % #4 is input stream until next backslash % % Input stream is either at the start of the argument, or just after a % backslash sequence, either a lone backslash, or a doubled backslash. % NEXT_TOKEN contains the first token in the input stream: if it is \finish, % finish; otherwise, append to ARG_RESULT the segment of the argument up until % the next backslash. PENDING_BACKSLASH contains a backslash to represent % a backslash just before the start of the input stream that has not been % added to ARG_RESULT. @gdef@add_segment#1!#2#3#4\{% @ifx#3@_finish @call_the_macro#1!% @else % append the pending backslash to the result, followed by the next segment @expandafter@is_fi@look_ahead#1#2#4!{\}@fi % this @fi is discarded by @look_ahead. % we can't get rid of it with \expandafter because we don't know how % long #4 is. } % #1 - THE_MACRO % #2 - ARG_RESULT % #3 discards the res of the conditional in @add_segment, and @is_fi ends the % conditional. @gdef@call_the_macro#1#2!#3@fi{@is_fi #1{#2}} } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \braceorline MAC is used for a one-argument macro MAC. It checks % whether the next non-whitespace character is a {. It sets the context % for reading the argument (slightly different in the two cases). Then, % to read the argument, in the whole-line case, it then calls the regular % \parsearg MAC; in the lbrace case, it calls \passargtomacro MAC. % \def\braceorline#1{\let\macnamexxx=#1\futurelet\nchar\braceorlinexxx} \def\braceorlinexxx{% \ifx\nchar\bgroup \macroargctxt \expandafter\passargtomacro \else \macrolineargctxt\expandafter\parsearg \fi \macnamexxx} % @alias. % We need some trickery to remove the optional spaces around the equal % sign. Make them active and then expand them all to nothing. % \def\alias{\parseargusing\obeyspaces\aliasxxx} \def\aliasxxx #1{\aliasyyy#1\relax} \def\aliasyyy #1=#2\relax{% {% \expandafter\let\obeyedspace=\empty \addtomacrolist{#1}% \xdef\next{\global\let\makecsname{#1}=\makecsname{#2}}% }% \next } \message{cross references,} \newwrite\auxfile \newif\ifhavexrefs % True if xref values are known. \newif\ifwarnedxrefs % True if we warned once that they aren't known. % @inforef is relatively simple. \def\inforef #1{\inforefzzz #1,,,,**} \def\inforefzzz #1,#2,#3,#4**{% \putwordSee{} \putwordInfo{} \putwordfile{} \file{\ignorespaces #3{}}, node \samp{\ignorespaces#1{}}} % @node's only job in TeX is to define \lastnode, which is used in % cross-references. The @node line might or might not have commas, and % might or might not have spaces before the first comma, like: % @node foo , bar , ... % We don't want such trailing spaces in the node name. % \parseargdef\node{\checkenv{}\donode #1 ,\finishnodeparse} % % also remove a trailing comma, in case of something like this: % @node Help-Cross, , , Cross-refs \def\donode#1 ,#2\finishnodeparse{\dodonode #1,\finishnodeparse} \def\dodonode#1,#2\finishnodeparse{\gdef\lastnode{#1}\omittopnode} % Used so that the @top node doesn't have to be wrapped in an @ifnottex % conditional. % \doignore goes to more effort to skip nested conditionals but we don't need % that here. \def\omittopnode{% \ifx\lastnode\wordTop \expandafter\ignorenode\fi } \def\wordTop{Top} % Until the next @node or @bye command, divert output to a box that is not % output. \def\ignorenode{\setbox\dummybox\vbox\bgroup\def\node{\egroup\node}% \ignorenodebye } {\let\bye\relax \gdef\ignorenodebye{\let\bye\ignorenodebyedef} \gdef\ignorenodebyedef{\egroup(`Top' node ignored)\bye}} % The redefinition of \bye here is because it is declared \outer \let\lastnode=\empty % Write a cross-reference definition for the current node. #1 is the % type (Ynumbered, Yappendix, Ynothing). % \def\donoderef#1{% \ifx\lastnode\empty\else \setref{\lastnode}{#1}% \global\let\lastnode=\empty \fi } % @anchor{NAME} -- define xref target at arbitrary point. % \newcount\savesfregister % \def\savesf{\relax \ifhmode \savesfregister=\spacefactor \fi} \def\restoresf{\relax \ifhmode \spacefactor=\savesfregister \fi} \def\anchor#1{\savesf \setref{#1}{Ynothing}\restoresf \ignorespaces} % \setref{NAME}{SNT} defines a cross-reference point NAME (a node or an % anchor), which consists of three parts: % 1) NAME-title - the current sectioning name taken from \currentsection, % or the anchor name. % 2) NAME-snt - section number and type, passed as the SNT arg, or % empty for anchors. % 3) NAME-pg - the page number. % % This is called from \donoderef, \anchor, and \dofloat. In the case of % floats, there is an additional part, which is not written here: % 4) NAME-lof - the text as it should appear in a @listoffloats. % \def\setref#1#2{% \pdfmkdest{#1}% \iflinks {% \requireauxfile \atdummies % preserve commands, but don't expand them % match definition in \xrdef, \refx, \xrefX. \def\value##1{##1}% \edef\writexrdef##1##2{% \write\auxfile{@xrdef{#1-% #1 of \setref, expanded by the \edef ##1}{##2}}% these are parameters of \writexrdef }% \toks0 = \expandafter{\currentsection}% \immediate \writexrdef{title}{\the\toks0 }% \immediate \writexrdef{snt}{\csname #2\endcsname}% \Ynumbered etc. \safewhatsit{\writexrdef{pg}{\folio}}% will be written later, at \shipout }% \fi } % @xrefautosectiontitle on|off says whether @section(ing) names are used % automatically in xrefs, if the third arg is not explicitly specified. % This was provided as a "secret" @set xref-automatic-section-title % variable, now it's official. % \parseargdef\xrefautomaticsectiontitle{% \def\temp{#1}% \ifx\temp\onword \expandafter\let\csname SETxref-automatic-section-title\endcsname = \empty \else\ifx\temp\offword \expandafter\let\csname SETxref-automatic-section-title\endcsname = \relax \else \errhelp = \EMsimple \errmessage{Unknown @xrefautomaticsectiontitle value `\temp', must be on|off}% \fi\fi } % % @xref, @pxref, and @ref generate cross-references. For \xrefX, #1 is % the node name, #2 the name of the Info cross-reference, #3 the printed % node name, #4 the name of the Info file, #5 the name of the printed % manual. All but the node name can be omitted. % \def\pxref{\putwordsee{} \xrefXX} \def\xref{\putwordSee{} \xrefXX} \def\ref{\xrefXX} \def\xrefXX#1{\def\xrefXXarg{#1}\futurelet\tokenafterxref\xrefXXX} \def\xrefXXX{\expandafter\xrefX\expandafter[\xrefXXarg,,,,,,,]} % \newbox\toprefbox \newbox\printedrefnamebox \newbox\infofilenamebox \newbox\printedmanualbox % \def\xrefX[#1,#2,#3,#4,#5,#6]{\begingroup \unsepspaces % % Get args without leading/trailing spaces. \def\printedrefname{\ignorespaces #3}% \setbox\printedrefnamebox = \hbox{\printedrefname\unskip}% % \def\infofilename{\ignorespaces #4}% \setbox\infofilenamebox = \hbox{\infofilename\unskip}% % \def\printedmanual{\ignorespaces #5}% \setbox\printedmanualbox = \hbox{\printedmanual\unskip}% % % If the printed reference name (arg #3) was not explicitly given in % the @xref, figure out what we want to use. \ifdim \wd\printedrefnamebox = 0pt % No printed node name was explicitly given. \expandafter\ifx\csname SETxref-automatic-section-title\endcsname \relax % Not auto section-title: use node name inside the square brackets. \def\printedrefname{\ignorespaces #1}% \else % Auto section-title: use chapter/section title inside % the square brackets if we have it. \ifdim \wd\printedmanualbox > 0pt % It is in another manual, so we don't have it; use node name. \def\printedrefname{\ignorespaces #1}% \else \ifhavexrefs % We (should) know the real title if we have the xref values. \def\printedrefname{\refx{#1-title}{}}% \else % Otherwise just copy the Info node name. \def\printedrefname{\ignorespaces #1}% \fi% \fi \fi \fi % % Make link in pdf output. \ifpdf % For pdfTeX and LuaTeX {\indexnofonts \makevalueexpandable \turnoffactive % This expands tokens, so do it after making catcode changes, so _ % etc. don't get their TeX definitions. This ignores all spaces in % #4, including (wrongly) those in the middle of the filename. \getfilename{#4}% % % This (wrongly) does not take account of leading or trailing % spaces in #1, which should be ignored. \setpdfdestname{#1}% % \ifx\pdfdestname\empty \def\pdfdestname{Top}% no empty targets \fi % \leavevmode \startlink attr{/Border [0 0 0]}% \ifnum\filenamelength>0 goto file{\the\filename.pdf} name{\pdfdestname}% \else goto name{\pdfmkpgn{\pdfdestname}}% \fi }% \setcolor{\linkcolor}% \else \ifx\XeTeXrevision\thisisundefined \else % For XeTeX {\indexnofonts \makevalueexpandable \turnoffactive % This expands tokens, so do it after making catcode changes, so _ % etc. don't get their TeX definitions. This ignores all spaces in % #4, including (wrongly) those in the middle of the filename. \getfilename{#4}% % % This (wrongly) does not take account of leading or trailing % spaces in #1, which should be ignored. \setpdfdestname{#1}% % \ifx\pdfdestname\empty \def\pdfdestname{Top}% no empty targets \fi % \leavevmode \ifnum\filenamelength>0 % With default settings, % XeTeX (xdvipdfmx) replaces link destination names with integers. % In this case, the replaced destination names of % remote PDFs are no longer known. In order to avoid a replacement, % you can use xdvipdfmx's command line option `-C 0x0010'. % If you use XeTeX 0.99996+ (TeX Live 2016+), % this command line option is no longer necessary % because we can use the `dvipdfmx:config' special. \special{pdf:bann << /Border [0 0 0] /Type /Annot /Subtype /Link /A << /S /GoToR /F (\the\filename.pdf) /D (\pdfdestname) >> >>}% \else \special{pdf:bann << /Border [0 0 0] /Type /Annot /Subtype /Link /A << /S /GoTo /D (\pdfdestname) >> >>}% \fi }% \setcolor{\linkcolor}% \fi \fi {% % Have to otherify everything special to allow the \csname to % include an _ in the xref name, etc. \indexnofonts \turnoffactive \def\value##1{##1}% \expandafter\global\expandafter\let\expandafter\Xthisreftitle \csname XR#1-title\endcsname }% % % Float references are printed completely differently: "Figure 1.2" % instead of "[somenode], p.3". \iffloat distinguishes them by % \Xthisreftitle being set to a magic string. \iffloat\Xthisreftitle % If the user specified the print name (third arg) to the ref, % print it instead of our usual "Figure 1.2". \ifdim\wd\printedrefnamebox = 0pt \refx{#1-snt}{}% \else \printedrefname \fi % % If the user also gave the printed manual name (fifth arg), append % "in MANUALNAME". \ifdim \wd\printedmanualbox > 0pt \space \putwordin{} \cite{\printedmanual}% \fi \else % node/anchor (non-float) references. % % If we use \unhbox to print the node names, TeX does not insert % empty discretionaries after hyphens, which means that it will not % find a line break at a hyphen in a node names. Since some manuals % are best written with fairly long node names, containing hyphens, % this is a loss. Therefore, we give the text of the node name % again, so it is as if TeX is seeing it for the first time. % \ifdim \wd\printedmanualbox > 0pt % Cross-manual reference with a printed manual name. % \crossmanualxref{\cite{\printedmanual\unskip}}% % \else\ifdim \wd\infofilenamebox > 0pt % Cross-manual reference with only an info filename (arg 4), no % printed manual name (arg 5). This is essentially the same as % the case above; we output the filename, since we have nothing else. % \crossmanualxref{\code{\infofilename\unskip}}% % \else % Reference within this manual. % % Only output a following space if the -snt ref is nonempty; for % @unnumbered and @anchor, it won't be. \setbox2 = \hbox{\ignorespaces \refx{#1-snt}{}}% \ifdim \wd2 > 0pt \refx{#1-snt}\space\fi % % output the `[mynode]' via the macro below so it can be overridden. \xrefprintnodename\printedrefname % \expandafter\ifx\csname SETtxiomitxrefpg\endcsname\relax % But we always want a comma and a space: ,\space % % output the `page 3'. \turnoffactive \putwordpage\tie\refx{#1-pg}{}% % Add a , if xref followed by a space \if\space\noexpand\tokenafterxref ,% \else\ifx\ \tokenafterxref ,% @TAB \else\ifx\*\tokenafterxref ,% @* \else\ifx\ \tokenafterxref ,% @SPACE \else\ifx\ \tokenafterxref ,% @NL \else\ifx\tie\tokenafterxref ,% @tie \fi\fi\fi\fi\fi\fi \fi \fi\fi \fi \endlink \endgroup} % Output a cross-manual xref to #1. Used just above (twice). % % Only include the text "Section ``foo'' in" if the foo is neither % missing or Top. Thus, @xref{,,,foo,The Foo Manual} outputs simply % "see The Foo Manual", the idea being to refer to the whole manual. % % But, this being TeX, we can't easily compare our node name against the % string "Top" while ignoring the possible spaces before and after in % the input. By adding the arbitrary 7sp below, we make it much less % likely that a real node name would have the same width as "Top" (e.g., % in a monospaced font). Hopefully it will never happen in practice. % % For the same basic reason, we retypeset the "Top" at every % reference, since the current font is indeterminate. % \def\crossmanualxref#1{% \setbox\toprefbox = \hbox{Top\kern7sp}% \setbox2 = \hbox{\ignorespaces \printedrefname \unskip \kern7sp}% \ifdim \wd2 > 7sp % nonempty? \ifdim \wd2 = \wd\toprefbox \else % same as Top? \putwordSection{} ``\printedrefname'' \putwordin{}\space \fi \fi #1% } % This macro is called from \xrefX for the `[nodename]' part of xref % output. It's a separate macro only so it can be changed more easily, % since square brackets don't work well in some documents. Particularly % one that Bob is working on :). % \def\xrefprintnodename#1{[#1]} % Things referred to by \setref. % \def\Ynothing{} \def\Yomitfromtoc{} \def\Ynumbered{% \ifnum\secno=0 \putwordChapter@tie \the\chapno \else \ifnum\subsecno=0 \putwordSection@tie \the\chapno.\the\secno \else \ifnum\subsubsecno=0 \putwordSection@tie \the\chapno.\the\secno.\the\subsecno \else \putwordSection@tie \the\chapno.\the\secno.\the\subsecno.\the\subsubsecno \fi\fi\fi } \def\Yappendix{% \ifnum\secno=0 \putwordAppendix@tie @char\the\appendixno{}% \else \ifnum\subsecno=0 \putwordSection@tie @char\the\appendixno.\the\secno \else \ifnum\subsubsecno=0 \putwordSection@tie @char\the\appendixno.\the\secno.\the\subsecno \else \putwordSection@tie @char\the\appendixno.\the\secno.\the\subsecno.\the\subsubsecno \fi\fi\fi } % \refx{NAME}{SUFFIX} - reference a cross-reference string named NAME. SUFFIX % is output afterwards if non-empty. \def\refx#1#2{% \requireauxfile {% \indexnofonts \turnoffactive \def\value##1{##1}% \expandafter\global\expandafter\let\expandafter\thisrefX \csname XR#1\endcsname }% \ifx\thisrefX\relax % If not defined, say something at least. \angleleft un\-de\-fined\angleright \iflinks \ifhavexrefs {\toks0 = {#1}% avoid expansion of possibly-complex value \message{\linenumber Undefined cross reference `\the\toks0'.}}% \else \ifwarnedxrefs\else \global\warnedxrefstrue \message{Cross reference values unknown; you must run TeX again.}% \fi \fi \fi \else % It's defined, so just use it. \thisrefX \fi #2% Output the suffix in any case. } % This is the macro invoked by entries in the aux file. Define a control % sequence for a cross-reference target (we prepend XR to the control sequence % name to avoid collisions). The value is the page number. If this is a float % type, we have more work to do. % \def\xrdef#1#2{% {% Expand the node or anchor name to remove control sequences. % \turnoffactive stops 8-bit characters being changed to commands % like @'e. \refx does the same to retrieve the value in the definition. \indexnofonts \turnoffactive \def\value##1{##1}% \xdef\safexrefname{#1}% }% % \bgroup \expandafter\gdef\csname XR\safexrefname\endcsname{#2}% \egroup % We put the \gdef inside a group to avoid the definitions building up on % TeX's save stack, which can cause it to run out of space for aux files with % thousands of lines. \gdef doesn't use the save stack, but \csname does % when it defines an unknown control sequence as \relax. % % Was that xref control sequence that we just defined for a float? \expandafter\iffloat\csname XR\safexrefname\endcsname % it was a float, and we have the (safe) float type in \iffloattype. \expandafter\let\expandafter\floatlist \csname floatlist\iffloattype\endcsname % % Is this the first time we've seen this float type? \expandafter\ifx\floatlist\relax \toks0 = {\do}% yes, so just \do \else % had it before, so preserve previous elements in list. \toks0 = \expandafter{\floatlist\do}% \fi % % Remember this xref in the control sequence \floatlistFLOATTYPE, % for later use in \listoffloats. \expandafter\xdef\csname floatlist\iffloattype\endcsname{\the\toks0 {\safexrefname}}% \fi } % If working on a large document in chapters, it is convenient to % be able to disable indexing, cross-referencing, and contents, for test runs. % This is done with @novalidate at the beginning of the file. % \newif\iflinks \linkstrue % by default we want the aux files. \let\novalidate = \linksfalse % Used when writing to the aux file, or when using data from it. \def\requireauxfile{% \iflinks \tryauxfile % Open the new aux file. TeX will close it automatically at exit. \immediate\openout\auxfile=\jobname.aux \fi \global\let\requireauxfile=\relax % Only do this once. } % Read the last existing aux file, if any. No error if none exists. % \def\tryauxfile{% \openin 1 \jobname.aux \ifeof 1 \else \readdatafile{aux}% \global\havexrefstrue \fi \closein 1 } \def\setupdatafile{% \catcode`\^^@=\other \catcode`\^^A=\other \catcode`\^^B=\other \catcode`\^^C=\other \catcode`\^^D=\other \catcode`\^^E=\other \catcode`\^^F=\other \catcode`\^^G=\other \catcode`\^^H=\other \catcode`\^^K=\other \catcode`\^^L=\other \catcode`\^^N=\other \catcode`\^^P=\other \catcode`\^^Q=\other \catcode`\^^R=\other \catcode`\^^S=\other \catcode`\^^T=\other \catcode`\^^U=\other \catcode`\^^V=\other \catcode`\^^W=\other \catcode`\^^X=\other \catcode`\^^Z=\other \catcode`\^^[=\other \catcode`\^^\=\other \catcode`\^^]=\other \catcode`\^^^=\other \catcode`\^^_=\other \catcode`\^=\other % % Special characters. Should be turned off anyway, but... \catcode`\~=\other \catcode`\[=\other \catcode`\]=\other \catcode`\"=\other \catcode`\_=\other \catcode`\|=\other \catcode`\<=\other \catcode`\>=\other \catcode`\$=\other \catcode`\#=\other \catcode`\&=\other \catcode`\%=\other \catcode`+=\other % avoid \+ for paranoia even though we've turned it off % \catcode`\\=\active % % @ is our escape character in .aux files, and we need braces. \catcode`\{=1 \catcode`\}=2 \catcode`\@=0 } \def\readdatafile#1{% \begingroup \setupdatafile \input\jobname.#1 \endgroup} \message{insertions,} % including footnotes. \newcount \footnoteno % The trailing space in the following definition for supereject is % vital for proper filling; pages come out unaligned when you do a % pagealignmacro call if that space before the closing brace is % removed. (Generally, numeric constants should always be followed by a % space to prevent strange expansion errors.) \def\supereject{\par\penalty -20000\footnoteno =0 } % @footnotestyle is meaningful for Info output only. \let\footnotestyle=\comment {\catcode `\@=11 % % Auto-number footnotes. Otherwise like plain. \gdef\footnote{% \global\advance\footnoteno by \@ne \edef\thisfootno{$^{\the\footnoteno}$}% % % In case the footnote comes at the end of a sentence, preserve the % extra spacing after we do the footnote number. \let\@sf\empty \ifhmode\edef\@sf{\spacefactor\the\spacefactor}\ptexslash\fi % % Remove inadvertent blank space before typesetting the footnote number. \unskip \thisfootno\@sf \dofootnote }% % Don't bother with the trickery in plain.tex to not require the % footnote text as a parameter. Our footnotes don't need to be so general. % % Oh yes, they do; otherwise, @ifset (and anything else that uses % \parseargline) fails inside footnotes because the tokens are fixed when % the footnote is read. --karl, 16nov96. % \gdef\dofootnote{% \insert\footins\bgroup % % Nested footnotes are not supported in TeX, that would take a lot % more work. (\startsavinginserts does not suffice.) \let\footnote=\errfootnotenest % % We want to typeset this text as a normal paragraph, even if the % footnote reference occurs in (for example) a display environment. % So reset some parameters. \hsize=\txipagewidth \interlinepenalty\interfootnotelinepenalty \splittopskip\ht\strutbox % top baseline for broken footnotes \splitmaxdepth\dp\strutbox \floatingpenalty\@MM \leftskip\z@skip \rightskip\z@skip \spaceskip\z@skip \xspaceskip\z@skip \parindent\defaultparindent % \smallfonts \rm % % Because we use hanging indentation in footnotes, a @noindent appears % to exdent this text, so make it be a no-op. makeinfo does not use % hanging indentation so @noindent can still be needed within footnote % text after an @example or the like (not that this is good style). \let\noindent = \relax % % Hang the footnote text off the number. Use \everypar in case the % footnote extends for more than one paragraph. \everypar = {\hang}% \textindent{\thisfootno}% % % Don't crash into the line above the footnote text. Since this % expands into a box, it must come within the paragraph, lest it % provide a place where TeX can split the footnote. \footstrut % % Invoke rest of plain TeX footnote routine. \futurelet\next\fo@t } }%end \catcode `\@=11 \def\errfootnotenest{% \errhelp=\EMsimple \errmessage{Nested footnotes not supported in texinfo.tex, even though they work in makeinfo; sorry} } \def\errfootnoteheading{% \errhelp=\EMsimple \errmessage{Footnotes in chapters, sections, etc., are not supported} } % In case a @footnote appears in a vbox, save the footnote text and create % the real \insert just after the vbox finished. Otherwise, the insertion % would be lost. % Similarly, if a @footnote appears inside an alignment, save the footnote % text to a box and make the \insert when a row of the table is finished. % And the same can be done for other insert classes. --kasal, 16nov03. % % Replace the \insert primitive by a cheating macro. % Deeper inside, just make sure that the saved insertions are not spilled % out prematurely. % \def\startsavinginserts{% \ifx \insert\ptexinsert \let\insert\saveinsert \else \let\checkinserts\relax \fi } % This \insert replacement works for both \insert\footins{foo} and % \insert\footins\bgroup foo\egroup, but it doesn't work for \insert27{foo}. % \def\saveinsert#1{% \edef\next{\noexpand\savetobox \makeSAVEname#1}% \afterassignment\next % swallow the left brace \let\temp = } \def\makeSAVEname#1{\makecsname{SAVE\expandafter\gobble\string#1}} \def\savetobox#1{\global\setbox#1 = \vbox\bgroup \unvbox#1} \def\checksaveins#1{\ifvoid#1\else \placesaveins#1\fi} \def\placesaveins#1{% \ptexinsert \csname\expandafter\gobblesave\string#1\endcsname {\box#1}% } % eat @SAVE -- beware, all of them have catcode \other: { \def\dospecials{\do S\do A\do V\do E} \uncatcodespecials % ;-) \gdef\gobblesave @SAVE{} } % initialization: \def\newsaveins #1{% \edef\next{\noexpand\newsaveinsX \makeSAVEname#1}% \next } \def\newsaveinsX #1{% \csname newbox\endcsname #1% \expandafter\def\expandafter\checkinserts\expandafter{\checkinserts \checksaveins #1}% } % initialize: \let\checkinserts\empty \newsaveins\footins \newsaveins\margin % @image. We use the macros from epsf.tex to support this. % If epsf.tex is not installed and @image is used, we complain. % % Check for and read epsf.tex up front. If we read it only at @image % time, we might be inside a group, and then its definitions would get % undone and the next image would fail. \openin 1 = epsf.tex \ifeof 1 \else % Do not bother showing banner with epsf.tex v2.7k (available in % doc/epsf.tex and on ctan). \def\epsfannounce{\toks0 = }% \input epsf.tex \fi \closein 1 % % We will only complain once about lack of epsf.tex. \newif\ifwarnednoepsf \newhelp\noepsfhelp{epsf.tex must be installed for images to work. It is also included in the Texinfo distribution, or you can get it from https://ctan.org/texarchive/macros/texinfo/texinfo/doc/epsf.tex.} % \def\image#1{% \ifx\epsfbox\thisisundefined \ifwarnednoepsf \else \errhelp = \noepsfhelp \errmessage{epsf.tex not found, images will be ignored}% \global\warnednoepsftrue \fi \else \imagexxx #1,,,,,\finish \fi } % % Arguments to @image: % #1 is (mandatory) image filename; we tack on .eps extension. % #2 is (optional) width, #3 is (optional) height. % #4 is (ignored optional) html alt text. % #5 is (ignored optional) extension. % #6 is just the usual extra ignored arg for parsing stuff. \newif\ifimagevmode \def\imagexxx#1,#2,#3,#4,#5,#6\finish{\begingroup \catcode`\^^M = 5 % in case we're inside an example \normalturnoffactive % allow _ et al. in names \makevalueexpandable % If the image is by itself, center it. \ifvmode \imagevmodetrue \else \ifx\centersub\centerV % for @center @image, we need a vbox so we can have our vertical space \imagevmodetrue \vbox\bgroup % vbox has better behavior than vtop herev \fi\fi % \ifimagevmode \nobreak\medskip % Usually we'll have text after the image which will insert % \parskip glue, so insert it here too to equalize the space % above and below. \nobreak\vskip\parskip \nobreak \fi % % Leave vertical mode so that indentation from an enclosing % environment such as @quotation is respected. % However, if we're at the top level, we don't want the % normal paragraph indentation. % On the other hand, if we are in the case of @center @image, we don't % want to start a paragraph, which will create a hsize-width box and % eradicate the centering. \ifx\centersub\centerV\else \noindent \fi % % Output the image. \ifpdf % For pdfTeX and LuaTeX <= 0.80 \dopdfimage{#1}{#2}{#3}% \else \ifx\XeTeXrevision\thisisundefined % For epsf.tex % \epsfbox itself resets \epsf?size at each figure. \setbox0 = \hbox{\ignorespaces #2}% \ifdim\wd0 > 0pt \epsfxsize=#2\relax \fi \setbox0 = \hbox{\ignorespaces #3}% \ifdim\wd0 > 0pt \epsfysize=#3\relax \fi \epsfbox{#1.eps}% \else % For XeTeX \doxeteximage{#1}{#2}{#3}% \fi \fi % \ifimagevmode \medskip % space after a standalone image \fi \ifx\centersub\centerV \egroup \fi \endgroup} % @float FLOATTYPE,LABEL,LOC ... @end float for displayed figures, tables, % etc. We don't actually implement floating yet, we always include the % float "here". But it seemed the best name for the future. % \envparseargdef\float{\eatcommaspace\eatcommaspace\dofloat#1, , ,\finish} % There may be a space before second and/or third parameter; delete it. \def\eatcommaspace#1, {#1,} % #1 is the optional FLOATTYPE, the text label for this float, typically % "Figure", "Table", "Example", etc. Can't contain commas. If omitted, % this float will not be numbered and cannot be referred to. % % #2 is the optional xref label. Also must be present for the float to % be referable. % % #3 is the optional positioning argument; for now, it is ignored. It % will somehow specify the positions allowed to float to (here, top, bottom). % % We keep a separate counter for each FLOATTYPE, which we reset at each % chapter-level command. \let\resetallfloatnos=\empty % \def\dofloat#1,#2,#3,#4\finish{% \let\thiscaption=\empty \let\thisshortcaption=\empty % % don't lose footnotes inside @float. % % BEWARE: when the floats start float, we have to issue warning whenever an % insert appears inside a float which could possibly float. --kasal, 26may04 % \startsavinginserts % % We can't be used inside a paragraph. \par % \vtop\bgroup \def\floattype{#1}% \def\floatlabel{#2}% \def\floatloc{#3}% we do nothing with this yet. % \ifx\floattype\empty \let\safefloattype=\empty \else {% % the floattype might have accents or other special characters, % but we need to use it in a control sequence name. \indexnofonts \turnoffactive \xdef\safefloattype{\floattype}% }% \fi % % If label is given but no type, we handle that as the empty type. \ifx\floatlabel\empty \else % We want each FLOATTYPE to be numbered separately (Figure 1, % Table 1, Figure 2, ...). (And if no label, no number.) % \expandafter\getfloatno\csname\safefloattype floatno\endcsname \global\advance\floatno by 1 % {% % This magic value for \currentsection is output by \setref as the % XREFLABEL-title value. \xrefX uses it to distinguish float % labels (which have a completely different output format) from % node and anchor labels. And \xrdef uses it to construct the % lists of floats. % \edef\currentsection{\floatmagic=\safefloattype}% \setref{\floatlabel}{Yfloat}% }% \fi % % start with \parskip glue, I guess. \vskip\parskip % % Don't suppress indentation if a float happens to start a section. \restorefirstparagraphindent } % we have these possibilities: % @float Foo,lbl & @caption{Cap}: Foo 1.1: Cap % @float Foo,lbl & no caption: Foo 1.1 % @float Foo & @caption{Cap}: Foo: Cap % @float Foo & no caption: Foo % @float ,lbl & Caption{Cap}: 1.1: Cap % @float ,lbl & no caption: 1.1 % @float & @caption{Cap}: Cap % @float & no caption: % \def\Efloat{% \let\floatident = \empty % % In all cases, if we have a float type, it comes first. \ifx\floattype\empty \else \def\floatident{\floattype}\fi % % If we have an xref label, the number comes next. \ifx\floatlabel\empty \else \ifx\floattype\empty \else % if also had float type, need tie first. \appendtomacro\floatident{\tie}% \fi % the number. \appendtomacro\floatident{\chaplevelprefix\the\floatno}% \fi % % Start the printed caption with what we've constructed in % \floatident, but keep it separate; we need \floatident again. \let\captionline = \floatident % \ifx\thiscaption\empty \else \ifx\floatident\empty \else \appendtomacro\captionline{: }% had ident, so need a colon between \fi % % caption text. \appendtomacro\captionline{\scanexp\thiscaption}% \fi % % If we have anything to print, print it, with space before. % Eventually this needs to become an \insert. \ifx\captionline\empty \else \vskip.5\parskip \captionline % % Space below caption. \vskip\parskip \fi % % If have an xref label, write the list of floats info. Do this % after the caption, to avoid chance of it being a breakpoint. \ifx\floatlabel\empty \else % Write the text that goes in the lof to the aux file as % \floatlabel-lof. Besides \floatident, we include the short % caption if specified, else the full caption if specified, else nothing. {% \requireauxfile \atdummies % \ifx\thisshortcaption\empty \def\gtemp{\thiscaption}% \else \def\gtemp{\thisshortcaption}% \fi \immediate\write\auxfile{@xrdef{\floatlabel-lof}{\floatident \ifx\gtemp\empty \else : \gtemp \fi}}% }% \fi \egroup % end of \vtop % \checkinserts } % Append the tokens #2 to the definition of macro #1, not expanding either. % \def\appendtomacro#1#2{% \expandafter\def\expandafter#1\expandafter{#1#2}% } % @caption, @shortcaption % \def\caption{\docaption\thiscaption} \def\shortcaption{\docaption\thisshortcaption} \def\docaption{\checkenv\float \bgroup\scanargctxt\defcaption} \def\defcaption#1#2{\egroup \def#1{#2}} % The parameter is the control sequence identifying the counter we are % going to use. Create it if it doesn't exist and assign it to \floatno. \def\getfloatno#1{% \ifx#1\relax % Haven't seen this figure type before. \csname newcount\endcsname #1% % % Remember to reset this floatno at the next chap. \expandafter\gdef\expandafter\resetallfloatnos \expandafter{\resetallfloatnos #1=0 }% \fi \let\floatno#1% } % \setref calls this to get the XREFLABEL-snt value. We want an @xref % to the FLOATLABEL to expand to "Figure 3.1". We call \setref when we % first read the @float command. % \def\Yfloat{\floattype@tie \chaplevelprefix\the\floatno}% % Magic string used for the XREFLABEL-title value, so \xrefX can % distinguish floats from other xref types. \def\floatmagic{!!float!!} % #1 is the control sequence we are passed; we expand into a conditional % which is true if #1 represents a float ref. That is, the magic % \currentsection value which we \setref above. % \def\iffloat#1{\expandafter\doiffloat#1==\finish} % % #1 is (maybe) the \floatmagic string. If so, #2 will be the % (safe) float type for this float. We set \iffloattype to #2. % \def\doiffloat#1=#2=#3\finish{% \def\temp{#1}% \def\iffloattype{#2}% \ifx\temp\floatmagic } % @listoffloats FLOATTYPE - print a list of floats like a table of contents. % \parseargdef\listoffloats{% \def\floattype{#1}% floattype {% % the floattype might have accents or other special characters, % but we need to use it in a control sequence name. \indexnofonts \turnoffactive \xdef\safefloattype{\floattype}% }% % % \xrdef saves the floats as a \do-list in \floatlistSAFEFLOATTYPE. \expandafter\ifx\csname floatlist\safefloattype\endcsname \relax \ifhavexrefs % if the user said @listoffloats foo but never @float foo. \message{\linenumber No `\safefloattype' floats to list.}% \fi \else \begingroup \leftskip=\tocindent % indent these entries like a toc \let\do=\listoffloatsdo \csname floatlist\safefloattype\endcsname \endgroup \fi } % This is called on each entry in a list of floats. We're passed the % xref label, in the form LABEL-title, which is how we save it in the % aux file. We strip off the -title and look up \XRLABEL-lof, which % has the text we're supposed to typeset here. % % Figures without xref labels will not be included in the list (since % they won't appear in the aux file). % \def\listoffloatsdo#1{\listoffloatsdoentry#1\finish} \def\listoffloatsdoentry#1-title\finish{{% % Can't fully expand XR#1-lof because it can contain anything. Just % pass the control sequence. On the other hand, XR#1-pg is just the % page number, and we want to fully expand that so we can get a link % in pdf output. \toksA = \expandafter{\csname XR#1-lof\endcsname}% % % use the same \entry macro we use to generate the TOC and index. \edef\writeentry{\noexpand\entry{\the\toksA}{\csname XR#1-pg\endcsname}}% \writeentry }} \message{localization,} % For single-language documents, @documentlanguage is usually given very % early, just after @documentencoding. Single argument is the language % (de) or locale (de_DE) abbreviation. % { \catcode`\_ = \active \globaldefs=1 \parseargdef\documentlanguage{% \tex % read txi-??.tex file in plain TeX. % Read the file by the name they passed if it exists. \let_ = \normalunderscore % normal _ character for filename test \openin 1 txi-#1.tex \ifeof 1 \documentlanguagetrywithoutunderscore #1_\finish \else \globaldefs = 1 % everything in the txi-LL files needs to persist \input txi-#1.tex \fi \closein 1 \endgroup % end raw TeX } % % If they passed de_DE, and txi-de_DE.tex doesn't exist, % try txi-de.tex. % \gdef\documentlanguagetrywithoutunderscore#1_#2\finish{% \openin 1 txi-#1.tex \ifeof 1 \errhelp = \nolanghelp \errmessage{Cannot read language file txi-#1.tex}% \else \globaldefs = 1 % everything in the txi-LL files needs to persist \input txi-#1.tex \fi \closein 1 } }% end of special _ catcode % \newhelp\nolanghelp{The given language definition file cannot be found or is empty. Maybe you need to install it? Putting it in the current directory should work if nowhere else does.} % This macro is called from txi-??.tex files; the first argument is the % \language name to set (without the "\lang@" prefix), the second and % third args are \{left,right}hyphenmin. % % The language names to pass are determined when the format is built. % See the etex.log file created at that time, e.g., % /usr/local/texlive/2008/texmf-var/web2c/pdftex/etex.log. % % With TeX Live 2008, etex now includes hyphenation patterns for all % available languages. This means we can support hyphenation in % Texinfo, at least to some extent. (This still doesn't solve the % accented characters problem.) % \catcode`@=11 \def\txisetlanguage#1#2#3{% % do not set the language if the name is undefined in the current TeX. \expandafter\ifx\csname lang@#1\endcsname \relax \message{no patterns for #1}% \else \global\language = \csname lang@#1\endcsname \fi % but there is no harm in adjusting the hyphenmin values regardless. \global\lefthyphenmin = #2\relax \global\righthyphenmin = #3\relax } % XeTeX and LuaTeX can handle Unicode natively. % Their default I/O uses UTF-8 sequences instead of a byte-wise operation. % Other TeX engines' I/O (pdfTeX, etc.) is byte-wise. % \newif\iftxinativeunicodecapable \newif\iftxiusebytewiseio \ifx\XeTeXrevision\thisisundefined \ifx\luatexversion\thisisundefined \txinativeunicodecapablefalse \txiusebytewiseiotrue \else \txinativeunicodecapabletrue \txiusebytewiseiofalse \fi \else \txinativeunicodecapabletrue \txiusebytewiseiofalse \fi % Set I/O by bytes instead of UTF-8 sequence for XeTeX and LuaTex % for non-UTF-8 (byte-wise) encodings. % \def\setbytewiseio{% \ifx\XeTeXrevision\thisisundefined \else \XeTeXdefaultencoding "bytes" % For subsequent files to be read \XeTeXinputencoding "bytes" % For document root file % Unfortunately, there seems to be no corresponding XeTeX command for % output encoding. This is a problem for auxiliary index and TOC files. % The only solution would be perhaps to write out @U{...} sequences in % place of non-ASCII characters. \fi \ifx\luatexversion\thisisundefined \else \directlua{ local utf8_char, byte, gsub = unicode.utf8.char, string.byte, string.gsub local function convert_char (char) return utf8_char(byte(char)) end local function convert_line (line) return gsub(line, ".", convert_char) end callback.register("process_input_buffer", convert_line) local function convert_line_out (line) local line_out = "" for c in string.utfvalues(line) do line_out = line_out .. string.char(c) end return line_out end callback.register("process_output_buffer", convert_line_out) } \fi \txiusebytewiseiotrue } % Helpers for encodings. % Set the catcode of characters 128 through 255 to the specified number. % \def\setnonasciicharscatcode#1{% \count255=128 \loop\ifnum\count255<256 \global\catcode\count255=#1\relax \advance\count255 by 1 \repeat } \def\setnonasciicharscatcodenonglobal#1{% \count255=128 \loop\ifnum\count255<256 \catcode\count255=#1\relax \advance\count255 by 1 \repeat } % @documentencoding sets the definition of non-ASCII characters % according to the specified encoding. % \def\documentencoding{\parseargusing\filenamecatcodes\documentencodingzzz} \def\documentencodingzzz#1{% % % Encoding being declared for the document. \def\declaredencoding{\csname #1.enc\endcsname}% % % Supported encodings: names converted to tokens in order to be able % to compare them with \ifx. \def\ascii{\csname US-ASCII.enc\endcsname}% \def\latnine{\csname ISO-8859-15.enc\endcsname}% \def\latone{\csname ISO-8859-1.enc\endcsname}% \def\lattwo{\csname ISO-8859-2.enc\endcsname}% \def\utfeight{\csname UTF-8.enc\endcsname}% % \ifx \declaredencoding \ascii \asciichardefs % \else \ifx \declaredencoding \lattwo \iftxinativeunicodecapable \setbytewiseio \fi \setnonasciicharscatcode\active \lattwochardefs % \else \ifx \declaredencoding \latone \iftxinativeunicodecapable \setbytewiseio \fi \setnonasciicharscatcode\active \latonechardefs % \else \ifx \declaredencoding \latnine \iftxinativeunicodecapable \setbytewiseio \fi \setnonasciicharscatcode\active \latninechardefs % \else \ifx \declaredencoding \utfeight \iftxinativeunicodecapable % For native Unicode handling (XeTeX and LuaTeX) \nativeunicodechardefs \else % For treating UTF-8 as byte sequences (TeX, eTeX and pdfTeX) \setnonasciicharscatcode\active % since we already invoked \utfeightchardefs at the top level % (below), do not re-invoke it, otherwise our check for duplicated % definitions gets triggered. Making non-ascii chars active is % sufficient. \fi % \else \message{Ignoring unknown document encoding: #1.}% % \fi % utfeight \fi % latnine \fi % latone \fi % lattwo \fi % ascii % \ifx\XeTeXrevision\thisisundefined \else \ifx \declaredencoding \utfeight \else \ifx \declaredencoding \ascii \else \message{Warning: XeTeX with non-UTF-8 encodings cannot handle % non-ASCII characters in auxiliary files.}% \fi \fi \fi } % emacs-page % A message to be logged when using a character that isn't available % the default font encoding (OT1). % \def\missingcharmsg#1{\message{Character missing, sorry: #1.}} % Take account of \c (plain) vs. \, (Texinfo) difference. \def\cedilla#1{\ifx\c\ptexc\c{#1}\else\,{#1}\fi} % First, make active non-ASCII characters in order for them to be % correctly categorized when TeX reads the replacement text of % macros containing the character definitions. \setnonasciicharscatcode\active % \def\gdefchar#1#2{% \gdef#1{% \ifpassthroughchars \string#1% \else #2% \fi }} % Latin1 (ISO-8859-1) character definitions. \def\latonechardefs{% \gdefchar^^a0{\tie} \gdefchar^^a1{\exclamdown} \gdefchar^^a2{{\tcfont \char162}} % cent \gdefchar^^a3{\pounds{}} \gdefchar^^a4{{\tcfont \char164}} % currency \gdefchar^^a5{{\tcfont \char165}} % yen \gdefchar^^a6{{\tcfont \char166}} % broken bar \gdefchar^^a7{\S} \gdefchar^^a8{\"{}} \gdefchar^^a9{\copyright{}} \gdefchar^^aa{\ordf} \gdefchar^^ab{\guillemetleft{}} \gdefchar^^ac{\ensuremath\lnot} \gdefchar^^ad{\-} \gdefchar^^ae{\registeredsymbol{}} \gdefchar^^af{\={}} % \gdefchar^^b0{\textdegree} \gdefchar^^b1{$\pm$} \gdefchar^^b2{$^2$} \gdefchar^^b3{$^3$} \gdefchar^^b4{\'{}} \gdefchar^^b5{$\mu$} \gdefchar^^b6{\P} \gdefchar^^b7{\ensuremath\cdot} \gdefchar^^b8{\cedilla\ } \gdefchar^^b9{$^1$} \gdefchar^^ba{\ordm} \gdefchar^^bb{\guillemetright{}} \gdefchar^^bc{$1\over4$} \gdefchar^^bd{$1\over2$} \gdefchar^^be{$3\over4$} \gdefchar^^bf{\questiondown} % \gdefchar^^c0{\`A} \gdefchar^^c1{\'A} \gdefchar^^c2{\^A} \gdefchar^^c3{\~A} \gdefchar^^c4{\"A} \gdefchar^^c5{\ringaccent A} \gdefchar^^c6{\AE} \gdefchar^^c7{\cedilla C} \gdefchar^^c8{\`E} \gdefchar^^c9{\'E} \gdefchar^^ca{\^E} \gdefchar^^cb{\"E} \gdefchar^^cc{\`I} \gdefchar^^cd{\'I} \gdefchar^^ce{\^I} \gdefchar^^cf{\"I} % \gdefchar^^d0{\DH} \gdefchar^^d1{\~N} \gdefchar^^d2{\`O} \gdefchar^^d3{\'O} \gdefchar^^d4{\^O} \gdefchar^^d5{\~O} \gdefchar^^d6{\"O} \gdefchar^^d7{$\times$} \gdefchar^^d8{\O} \gdefchar^^d9{\`U} \gdefchar^^da{\'U} \gdefchar^^db{\^U} \gdefchar^^dc{\"U} \gdefchar^^dd{\'Y} \gdefchar^^de{\TH} \gdefchar^^df{\ss} % \gdefchar^^e0{\`a} \gdefchar^^e1{\'a} \gdefchar^^e2{\^a} \gdefchar^^e3{\~a} \gdefchar^^e4{\"a} \gdefchar^^e5{\ringaccent a} \gdefchar^^e6{\ae} \gdefchar^^e7{\cedilla c} \gdefchar^^e8{\`e} \gdefchar^^e9{\'e} \gdefchar^^ea{\^e} \gdefchar^^eb{\"e} \gdefchar^^ec{\`{\dotless i}} \gdefchar^^ed{\'{\dotless i}} \gdefchar^^ee{\^{\dotless i}} \gdefchar^^ef{\"{\dotless i}} % \gdefchar^^f0{\dh} \gdefchar^^f1{\~n} \gdefchar^^f2{\`o} \gdefchar^^f3{\'o} \gdefchar^^f4{\^o} \gdefchar^^f5{\~o} \gdefchar^^f6{\"o} \gdefchar^^f7{$\div$} \gdefchar^^f8{\o} \gdefchar^^f9{\`u} \gdefchar^^fa{\'u} \gdefchar^^fb{\^u} \gdefchar^^fc{\"u} \gdefchar^^fd{\'y} \gdefchar^^fe{\th} \gdefchar^^ff{\"y} } % Latin9 (ISO-8859-15) encoding character definitions. \def\latninechardefs{% % Encoding is almost identical to Latin1. \latonechardefs % \gdefchar^^a4{\euro{}} \gdefchar^^a6{\v S} \gdefchar^^a8{\v s} \gdefchar^^b4{\v Z} \gdefchar^^b8{\v z} \gdefchar^^bc{\OE} \gdefchar^^bd{\oe} \gdefchar^^be{\"Y} } % Latin2 (ISO-8859-2) character definitions. \def\lattwochardefs{% \gdefchar^^a0{\tie} \gdefchar^^a1{\ogonek{A}} \gdefchar^^a2{\u{}} \gdefchar^^a3{\L} \gdefchar^^a4{\missingcharmsg{CURRENCY SIGN}} \gdefchar^^a5{\v L} \gdefchar^^a6{\'S} \gdefchar^^a7{\S} \gdefchar^^a8{\"{}} \gdefchar^^a9{\v S} \gdefchar^^aa{\cedilla S} \gdefchar^^ab{\v T} \gdefchar^^ac{\'Z} \gdefchar^^ad{\-} \gdefchar^^ae{\v Z} \gdefchar^^af{\dotaccent Z} % \gdefchar^^b0{\textdegree{}} \gdefchar^^b1{\ogonek{a}} \gdefchar^^b2{\ogonek{ }} \gdefchar^^b3{\l} \gdefchar^^b4{\'{}} \gdefchar^^b5{\v l} \gdefchar^^b6{\'s} \gdefchar^^b7{\v{}} \gdefchar^^b8{\cedilla\ } \gdefchar^^b9{\v s} \gdefchar^^ba{\cedilla s} \gdefchar^^bb{\v t} \gdefchar^^bc{\'z} \gdefchar^^bd{\H{}} \gdefchar^^be{\v z} \gdefchar^^bf{\dotaccent z} % \gdefchar^^c0{\'R} \gdefchar^^c1{\'A} \gdefchar^^c2{\^A} \gdefchar^^c3{\u A} \gdefchar^^c4{\"A} \gdefchar^^c5{\'L} \gdefchar^^c6{\'C} \gdefchar^^c7{\cedilla C} \gdefchar^^c8{\v C} \gdefchar^^c9{\'E} \gdefchar^^ca{\ogonek{E}} \gdefchar^^cb{\"E} \gdefchar^^cc{\v E} \gdefchar^^cd{\'I} \gdefchar^^ce{\^I} \gdefchar^^cf{\v D} % \gdefchar^^d0{\DH} \gdefchar^^d1{\'N} \gdefchar^^d2{\v N} \gdefchar^^d3{\'O} \gdefchar^^d4{\^O} \gdefchar^^d5{\H O} \gdefchar^^d6{\"O} \gdefchar^^d7{$\times$} \gdefchar^^d8{\v R} \gdefchar^^d9{\ringaccent U} \gdefchar^^da{\'U} \gdefchar^^db{\H U} \gdefchar^^dc{\"U} \gdefchar^^dd{\'Y} \gdefchar^^de{\cedilla T} \gdefchar^^df{\ss} % \gdefchar^^e0{\'r} \gdefchar^^e1{\'a} \gdefchar^^e2{\^a} \gdefchar^^e3{\u a} \gdefchar^^e4{\"a} \gdefchar^^e5{\'l} \gdefchar^^e6{\'c} \gdefchar^^e7{\cedilla c} \gdefchar^^e8{\v c} \gdefchar^^e9{\'e} \gdefchar^^ea{\ogonek{e}} \gdefchar^^eb{\"e} \gdefchar^^ec{\v e} \gdefchar^^ed{\'{\dotless{i}}} \gdefchar^^ee{\^{\dotless{i}}} \gdefchar^^ef{\v d} % \gdefchar^^f0{\dh} \gdefchar^^f1{\'n} \gdefchar^^f2{\v n} \gdefchar^^f3{\'o} \gdefchar^^f4{\^o} \gdefchar^^f5{\H o} \gdefchar^^f6{\"o} \gdefchar^^f7{$\div$} \gdefchar^^f8{\v r} \gdefchar^^f9{\ringaccent u} \gdefchar^^fa{\'u} \gdefchar^^fb{\H u} \gdefchar^^fc{\"u} \gdefchar^^fd{\'y} \gdefchar^^fe{\cedilla t} \gdefchar^^ff{\dotaccent{}} } % UTF-8 character definitions. % % This code to support UTF-8 is based on LaTeX's utf8.def, with some % changes for Texinfo conventions. It is included here under the GPL by % permission from Frank Mittelbach and the LaTeX team. % \newcount\countUTFx \newcount\countUTFy \newcount\countUTFz \gdef\UTFviiiTwoOctets#1#2{\expandafter \UTFviiiDefined\csname u8:#1\string #2\endcsname} % \gdef\UTFviiiThreeOctets#1#2#3{\expandafter \UTFviiiDefined\csname u8:#1\string #2\string #3\endcsname} % \gdef\UTFviiiFourOctets#1#2#3#4{\expandafter \UTFviiiDefined\csname u8:#1\string #2\string #3\string #4\endcsname} \gdef\UTFviiiDefined#1{% \ifx #1\relax \message{\linenumber Unicode char \string #1 not defined for Texinfo}% \else \expandafter #1% \fi } % Give non-ASCII bytes the active definitions for processing UTF-8 sequences \begingroup \catcode`\~13 \catcode`\$12 \catcode`\"12 % Loop from \countUTFx to \countUTFy, performing \UTFviiiTmp % substituting ~ and $ with a character token of that value. \def\UTFviiiLoop{% \global\catcode\countUTFx\active \uccode`\~\countUTFx \uccode`\$\countUTFx \uppercase\expandafter{\UTFviiiTmp}% \advance\countUTFx by 1 \ifnum\countUTFx < \countUTFy \expandafter\UTFviiiLoop \fi} % For bytes other than the first in a UTF-8 sequence. Not expected to % be expanded except when writing to auxiliary files. \countUTFx = "80 \countUTFy = "C2 \def\UTFviiiTmp{% \gdef~{% \ifpassthroughchars $\fi}}% \UTFviiiLoop \countUTFx = "C2 \countUTFy = "E0 \def\UTFviiiTmp{% \gdef~{% \ifpassthroughchars $% \else\expandafter\UTFviiiTwoOctets\expandafter$\fi}}% \UTFviiiLoop \countUTFx = "E0 \countUTFy = "F0 \def\UTFviiiTmp{% \gdef~{% \ifpassthroughchars $% \else\expandafter\UTFviiiThreeOctets\expandafter$\fi}}% \UTFviiiLoop \countUTFx = "F0 \countUTFy = "F4 \def\UTFviiiTmp{% \gdef~{% \ifpassthroughchars $% \else\expandafter\UTFviiiFourOctets\expandafter$\fi }}% \UTFviiiLoop \endgroup \def\globallet{\global\let} % save some \expandafter's below % @U{xxxx} to produce U+xxxx, if we support it. \def\U#1{% \expandafter\ifx\csname uni:#1\endcsname \relax \iftxinativeunicodecapable % All Unicode characters can be used if native Unicode handling is % active. However, if the font does not have the glyph, % letters are missing. \begingroup \uccode`\.="#1\relax \uppercase{.} \endgroup \else \errhelp = \EMsimple \errmessage{Unicode character U+#1 not supported, sorry}% \fi \else \csname uni:#1\endcsname \fi } % These macros are used here to construct the name of a control % sequence to be defined. \def\UTFviiiTwoOctetsName#1#2{% \csname u8:#1\string #2\endcsname}% \def\UTFviiiThreeOctetsName#1#2#3{% \csname u8:#1\string #2\string #3\endcsname}% \def\UTFviiiFourOctetsName#1#2#3#4{% \csname u8:#1\string #2\string #3\string #4\endcsname}% % For UTF-8 byte sequences (TeX, e-TeX and pdfTeX), % provide a definition macro to replace a Unicode character; % this gets used by the @U command % \begingroup \catcode`\"=12 \catcode`\<=12 \catcode`\.=12 \catcode`\,=12 \catcode`\;=12 \catcode`\!=12 \catcode`\~=13 \gdef\DeclareUnicodeCharacterUTFviii#1#2{% \countUTFz = "#1\relax \begingroup \parseXMLCharref % Give \u8:... its definition. The sequence of seven \expandafter's % expands after the \gdef three times, e.g. % % 1. \UTFviiTwoOctetsName B1 B2 % 2. \csname u8:B1 \string B2 \endcsname % 3. \u8: B1 B2 (a single control sequence token) % \expandafter\expandafter \expandafter\expandafter \expandafter\expandafter \expandafter\gdef \UTFviiiTmp{#2}% % \expandafter\ifx\csname uni:#1\endcsname \relax \else \message{Internal error, already defined: #1}% \fi % % define an additional control sequence for this code point. \expandafter\globallet\csname uni:#1\endcsname \UTFviiiTmp \endgroup} % % Given the value in \countUTFz as a Unicode code point, set \UTFviiiTmp % to the corresponding UTF-8 sequence. \gdef\parseXMLCharref{% \ifnum\countUTFz < "A0\relax \errhelp = \EMsimple \errmessage{Cannot define Unicode char value < 00A0}% \else\ifnum\countUTFz < "800\relax \parseUTFviiiA,% \parseUTFviiiB C\UTFviiiTwoOctetsName.,% \else\ifnum\countUTFz < "10000\relax \parseUTFviiiA;% \parseUTFviiiA,% \parseUTFviiiB E\UTFviiiThreeOctetsName.{,;}% \else \parseUTFviiiA;% \parseUTFviiiA,% \parseUTFviiiA!% \parseUTFviiiB F\UTFviiiFourOctetsName.{!,;}% \fi\fi\fi } % Extract a byte from the end of the UTF-8 representation of \countUTFx. % It must be a non-initial byte in the sequence. % Change \uccode of #1 for it to be used in \parseUTFviiiB as one % of the bytes. \gdef\parseUTFviiiA#1{% \countUTFx = \countUTFz \divide\countUTFz by 64 \countUTFy = \countUTFz % Save to be the future value of \countUTFz. \multiply\countUTFz by 64 % \countUTFz is now \countUTFx with the last 5 bits cleared. Subtract % in order to get the last five bits. \advance\countUTFx by -\countUTFz % Convert this to the byte in the UTF-8 sequence. \advance\countUTFx by 128 \uccode `#1\countUTFx \countUTFz = \countUTFy} % Used to put a UTF-8 byte sequence into \UTFviiiTmp % #1 is the increment for \countUTFz to yield a the first byte of the UTF-8 % sequence. % #2 is one of the \UTFviii*OctetsName macros. % #3 is always a full stop (.) % #4 is a template for the other bytes in the sequence. The values for these % bytes is substituted in here with \uppercase using the \uccode's. \gdef\parseUTFviiiB#1#2#3#4{% \advance\countUTFz by "#10\relax \uccode `#3\countUTFz \uppercase{\gdef\UTFviiiTmp{#2#3#4}}} \endgroup % For native Unicode handling (XeTeX and LuaTeX), % provide a definition macro that sets a catcode to `other' non-globally % \def\DeclareUnicodeCharacterNativeOther#1#2{% \catcode"#1=\other } % https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_M % U+0000..U+007F = https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block) % U+0080..U+00FF = https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block) % U+0100..U+017F = https://en.wikipedia.org/wiki/Latin_Extended-A % U+0180..U+024F = https://en.wikipedia.org/wiki/Latin_Extended-B % % Many of our renditions are less than wonderful, and all the missing % characters are available somewhere. Loading the necessary fonts % awaits user request. We can't truly support Unicode without % reimplementing everything that's been done in LaTeX for many years, % plus probably using luatex or xetex, and who knows what else. % We won't be doing that here in this simple file. But we can try to at % least make most of the characters not bomb out. % \def\unicodechardefs{% \DeclareUnicodeCharacter{00A0}{\tie}% \DeclareUnicodeCharacter{00A1}{\exclamdown}% \DeclareUnicodeCharacter{00A2}{{\tcfont \char162}}% 0242=cent \DeclareUnicodeCharacter{00A3}{\pounds{}}% \DeclareUnicodeCharacter{00A4}{{\tcfont \char164}}% 0244=currency \DeclareUnicodeCharacter{00A5}{{\tcfont \char165}}% 0245=yen \DeclareUnicodeCharacter{00A6}{{\tcfont \char166}}% 0246=brokenbar \DeclareUnicodeCharacter{00A7}{\S}% \DeclareUnicodeCharacter{00A8}{\"{ }}% \DeclareUnicodeCharacter{00A9}{\copyright{}}% \DeclareUnicodeCharacter{00AA}{\ordf}% \DeclareUnicodeCharacter{00AB}{\guillemetleft{}}% \DeclareUnicodeCharacter{00AC}{\ensuremath\lnot}% \DeclareUnicodeCharacter{00AD}{\-}% \DeclareUnicodeCharacter{00AE}{\registeredsymbol{}}% \DeclareUnicodeCharacter{00AF}{\={ }}% % \DeclareUnicodeCharacter{00B0}{\ringaccent{ }}% \DeclareUnicodeCharacter{00B1}{\ensuremath\pm}% \DeclareUnicodeCharacter{00B2}{$^2$}% \DeclareUnicodeCharacter{00B3}{$^3$}% \DeclareUnicodeCharacter{00B4}{\'{ }}% \DeclareUnicodeCharacter{00B5}{$\mu$}% \DeclareUnicodeCharacter{00B6}{\P}% \DeclareUnicodeCharacter{00B7}{\ensuremath\cdot}% \DeclareUnicodeCharacter{00B8}{\cedilla{ }}% \DeclareUnicodeCharacter{00B9}{$^1$}% \DeclareUnicodeCharacter{00BA}{\ordm}% \DeclareUnicodeCharacter{00BB}{\guillemetright{}}% \DeclareUnicodeCharacter{00BC}{$1\over4$}% \DeclareUnicodeCharacter{00BD}{$1\over2$}% \DeclareUnicodeCharacter{00BE}{$3\over4$}% \DeclareUnicodeCharacter{00BF}{\questiondown}% % \DeclareUnicodeCharacter{00C0}{\`A}% \DeclareUnicodeCharacter{00C1}{\'A}% \DeclareUnicodeCharacter{00C2}{\^A}% \DeclareUnicodeCharacter{00C3}{\~A}% \DeclareUnicodeCharacter{00C4}{\"A}% \DeclareUnicodeCharacter{00C5}{\AA}% \DeclareUnicodeCharacter{00C6}{\AE}% \DeclareUnicodeCharacter{00C7}{\cedilla{C}}% \DeclareUnicodeCharacter{00C8}{\`E}% \DeclareUnicodeCharacter{00C9}{\'E}% \DeclareUnicodeCharacter{00CA}{\^E}% \DeclareUnicodeCharacter{00CB}{\"E}% \DeclareUnicodeCharacter{00CC}{\`I}% \DeclareUnicodeCharacter{00CD}{\'I}% \DeclareUnicodeCharacter{00CE}{\^I}% \DeclareUnicodeCharacter{00CF}{\"I}% % \DeclareUnicodeCharacter{00D0}{\DH}% \DeclareUnicodeCharacter{00D1}{\~N}% \DeclareUnicodeCharacter{00D2}{\`O}% \DeclareUnicodeCharacter{00D3}{\'O}% \DeclareUnicodeCharacter{00D4}{\^O}% \DeclareUnicodeCharacter{00D5}{\~O}% \DeclareUnicodeCharacter{00D6}{\"O}% \DeclareUnicodeCharacter{00D7}{\ensuremath\times}% \DeclareUnicodeCharacter{00D8}{\O}% \DeclareUnicodeCharacter{00D9}{\`U}% \DeclareUnicodeCharacter{00DA}{\'U}% \DeclareUnicodeCharacter{00DB}{\^U}% \DeclareUnicodeCharacter{00DC}{\"U}% \DeclareUnicodeCharacter{00DD}{\'Y}% \DeclareUnicodeCharacter{00DE}{\TH}% \DeclareUnicodeCharacter{00DF}{\ss}% % \DeclareUnicodeCharacter{00E0}{\`a}% \DeclareUnicodeCharacter{00E1}{\'a}% \DeclareUnicodeCharacter{00E2}{\^a}% \DeclareUnicodeCharacter{00E3}{\~a}% \DeclareUnicodeCharacter{00E4}{\"a}% \DeclareUnicodeCharacter{00E5}{\aa}% \DeclareUnicodeCharacter{00E6}{\ae}% \DeclareUnicodeCharacter{00E7}{\cedilla{c}}% \DeclareUnicodeCharacter{00E8}{\`e}% \DeclareUnicodeCharacter{00E9}{\'e}% \DeclareUnicodeCharacter{00EA}{\^e}% \DeclareUnicodeCharacter{00EB}{\"e}% \DeclareUnicodeCharacter{00EC}{\`{\dotless{i}}}% \DeclareUnicodeCharacter{00ED}{\'{\dotless{i}}}% \DeclareUnicodeCharacter{00EE}{\^{\dotless{i}}}% \DeclareUnicodeCharacter{00EF}{\"{\dotless{i}}}% % \DeclareUnicodeCharacter{00F0}{\dh}% \DeclareUnicodeCharacter{00F1}{\~n}% \DeclareUnicodeCharacter{00F2}{\`o}% \DeclareUnicodeCharacter{00F3}{\'o}% \DeclareUnicodeCharacter{00F4}{\^o}% \DeclareUnicodeCharacter{00F5}{\~o}% \DeclareUnicodeCharacter{00F6}{\"o}% \DeclareUnicodeCharacter{00F7}{\ensuremath\div}% \DeclareUnicodeCharacter{00F8}{\o}% \DeclareUnicodeCharacter{00F9}{\`u}% \DeclareUnicodeCharacter{00FA}{\'u}% \DeclareUnicodeCharacter{00FB}{\^u}% \DeclareUnicodeCharacter{00FC}{\"u}% \DeclareUnicodeCharacter{00FD}{\'y}% \DeclareUnicodeCharacter{00FE}{\th}% \DeclareUnicodeCharacter{00FF}{\"y}% % \DeclareUnicodeCharacter{0100}{\=A}% \DeclareUnicodeCharacter{0101}{\=a}% \DeclareUnicodeCharacter{0102}{\u{A}}% \DeclareUnicodeCharacter{0103}{\u{a}}% \DeclareUnicodeCharacter{0104}{\ogonek{A}}% \DeclareUnicodeCharacter{0105}{\ogonek{a}}% \DeclareUnicodeCharacter{0106}{\'C}% \DeclareUnicodeCharacter{0107}{\'c}% \DeclareUnicodeCharacter{0108}{\^C}% \DeclareUnicodeCharacter{0109}{\^c}% \DeclareUnicodeCharacter{010A}{\dotaccent{C}}% \DeclareUnicodeCharacter{010B}{\dotaccent{c}}% \DeclareUnicodeCharacter{010C}{\v{C}}% \DeclareUnicodeCharacter{010D}{\v{c}}% \DeclareUnicodeCharacter{010E}{\v{D}}% \DeclareUnicodeCharacter{010F}{d'}% % \DeclareUnicodeCharacter{0110}{\DH}% \DeclareUnicodeCharacter{0111}{\dh}% \DeclareUnicodeCharacter{0112}{\=E}% \DeclareUnicodeCharacter{0113}{\=e}% \DeclareUnicodeCharacter{0114}{\u{E}}% \DeclareUnicodeCharacter{0115}{\u{e}}% \DeclareUnicodeCharacter{0116}{\dotaccent{E}}% \DeclareUnicodeCharacter{0117}{\dotaccent{e}}% \DeclareUnicodeCharacter{0118}{\ogonek{E}}% \DeclareUnicodeCharacter{0119}{\ogonek{e}}% \DeclareUnicodeCharacter{011A}{\v{E}}% \DeclareUnicodeCharacter{011B}{\v{e}}% \DeclareUnicodeCharacter{011C}{\^G}% \DeclareUnicodeCharacter{011D}{\^g}% \DeclareUnicodeCharacter{011E}{\u{G}}% \DeclareUnicodeCharacter{011F}{\u{g}}% % \DeclareUnicodeCharacter{0120}{\dotaccent{G}}% \DeclareUnicodeCharacter{0121}{\dotaccent{g}}% \DeclareUnicodeCharacter{0122}{\cedilla{G}}% \DeclareUnicodeCharacter{0123}{\cedilla{g}}% \DeclareUnicodeCharacter{0124}{\^H}% \DeclareUnicodeCharacter{0125}{\^h}% \DeclareUnicodeCharacter{0126}{\missingcharmsg{H WITH STROKE}}% \DeclareUnicodeCharacter{0127}{\missingcharmsg{h WITH STROKE}}% \DeclareUnicodeCharacter{0128}{\~I}% \DeclareUnicodeCharacter{0129}{\~{\dotless{i}}}% \DeclareUnicodeCharacter{012A}{\=I}% \DeclareUnicodeCharacter{012B}{\={\dotless{i}}}% \DeclareUnicodeCharacter{012C}{\u{I}}% \DeclareUnicodeCharacter{012D}{\u{\dotless{i}}}% \DeclareUnicodeCharacter{012E}{\ogonek{I}}% \DeclareUnicodeCharacter{012F}{\ogonek{i}}% % \DeclareUnicodeCharacter{0130}{\dotaccent{I}}% \DeclareUnicodeCharacter{0131}{\dotless{i}}% \DeclareUnicodeCharacter{0132}{IJ}% \DeclareUnicodeCharacter{0133}{ij}% \DeclareUnicodeCharacter{0134}{\^J}% \DeclareUnicodeCharacter{0135}{\^{\dotless{j}}}% \DeclareUnicodeCharacter{0136}{\cedilla{K}}% \DeclareUnicodeCharacter{0137}{\cedilla{k}}% \DeclareUnicodeCharacter{0138}{\ensuremath\kappa}% \DeclareUnicodeCharacter{0139}{\'L}% \DeclareUnicodeCharacter{013A}{\'l}% \DeclareUnicodeCharacter{013B}{\cedilla{L}}% \DeclareUnicodeCharacter{013C}{\cedilla{l}}% \DeclareUnicodeCharacter{013D}{L'}% should kern \DeclareUnicodeCharacter{013E}{l'}% should kern \DeclareUnicodeCharacter{013F}{L\U{00B7}}% % \DeclareUnicodeCharacter{0140}{l\U{00B7}}% \DeclareUnicodeCharacter{0141}{\L}% \DeclareUnicodeCharacter{0142}{\l}% \DeclareUnicodeCharacter{0143}{\'N}% \DeclareUnicodeCharacter{0144}{\'n}% \DeclareUnicodeCharacter{0145}{\cedilla{N}}% \DeclareUnicodeCharacter{0146}{\cedilla{n}}% \DeclareUnicodeCharacter{0147}{\v{N}}% \DeclareUnicodeCharacter{0148}{\v{n}}% \DeclareUnicodeCharacter{0149}{'n}% \DeclareUnicodeCharacter{014A}{\missingcharmsg{ENG}}% \DeclareUnicodeCharacter{014B}{\missingcharmsg{eng}}% \DeclareUnicodeCharacter{014C}{\=O}% \DeclareUnicodeCharacter{014D}{\=o}% \DeclareUnicodeCharacter{014E}{\u{O}}% \DeclareUnicodeCharacter{014F}{\u{o}}% % \DeclareUnicodeCharacter{0150}{\H{O}}% \DeclareUnicodeCharacter{0151}{\H{o}}% \DeclareUnicodeCharacter{0152}{\OE}% \DeclareUnicodeCharacter{0153}{\oe}% \DeclareUnicodeCharacter{0154}{\'R}% \DeclareUnicodeCharacter{0155}{\'r}% \DeclareUnicodeCharacter{0156}{\cedilla{R}}% \DeclareUnicodeCharacter{0157}{\cedilla{r}}% \DeclareUnicodeCharacter{0158}{\v{R}}% \DeclareUnicodeCharacter{0159}{\v{r}}% \DeclareUnicodeCharacter{015A}{\'S}% \DeclareUnicodeCharacter{015B}{\'s}% \DeclareUnicodeCharacter{015C}{\^S}% \DeclareUnicodeCharacter{015D}{\^s}% \DeclareUnicodeCharacter{015E}{\cedilla{S}}% \DeclareUnicodeCharacter{015F}{\cedilla{s}}% % \DeclareUnicodeCharacter{0160}{\v{S}}% \DeclareUnicodeCharacter{0161}{\v{s}}% \DeclareUnicodeCharacter{0162}{\cedilla{T}}% \DeclareUnicodeCharacter{0163}{\cedilla{t}}% \DeclareUnicodeCharacter{0164}{\v{T}}% \DeclareUnicodeCharacter{0165}{\v{t}}% \DeclareUnicodeCharacter{0166}{\missingcharmsg{H WITH STROKE}}% \DeclareUnicodeCharacter{0167}{\missingcharmsg{h WITH STROKE}}% \DeclareUnicodeCharacter{0168}{\~U}% \DeclareUnicodeCharacter{0169}{\~u}% \DeclareUnicodeCharacter{016A}{\=U}% \DeclareUnicodeCharacter{016B}{\=u}% \DeclareUnicodeCharacter{016C}{\u{U}}% \DeclareUnicodeCharacter{016D}{\u{u}}% \DeclareUnicodeCharacter{016E}{\ringaccent{U}}% \DeclareUnicodeCharacter{016F}{\ringaccent{u}}% % \DeclareUnicodeCharacter{0170}{\H{U}}% \DeclareUnicodeCharacter{0171}{\H{u}}% \DeclareUnicodeCharacter{0172}{\ogonek{U}}% \DeclareUnicodeCharacter{0173}{\ogonek{u}}% \DeclareUnicodeCharacter{0174}{\^W}% \DeclareUnicodeCharacter{0175}{\^w}% \DeclareUnicodeCharacter{0176}{\^Y}% \DeclareUnicodeCharacter{0177}{\^y}% \DeclareUnicodeCharacter{0178}{\"Y}% \DeclareUnicodeCharacter{0179}{\'Z}% \DeclareUnicodeCharacter{017A}{\'z}% \DeclareUnicodeCharacter{017B}{\dotaccent{Z}}% \DeclareUnicodeCharacter{017C}{\dotaccent{z}}% \DeclareUnicodeCharacter{017D}{\v{Z}}% \DeclareUnicodeCharacter{017E}{\v{z}}% \DeclareUnicodeCharacter{017F}{\missingcharmsg{LONG S}}% % \DeclareUnicodeCharacter{01C4}{D\v{Z}}% \DeclareUnicodeCharacter{01C5}{D\v{z}}% \DeclareUnicodeCharacter{01C6}{d\v{z}}% \DeclareUnicodeCharacter{01C7}{LJ}% \DeclareUnicodeCharacter{01C8}{Lj}% \DeclareUnicodeCharacter{01C9}{lj}% \DeclareUnicodeCharacter{01CA}{NJ}% \DeclareUnicodeCharacter{01CB}{Nj}% \DeclareUnicodeCharacter{01CC}{nj}% \DeclareUnicodeCharacter{01CD}{\v{A}}% \DeclareUnicodeCharacter{01CE}{\v{a}}% \DeclareUnicodeCharacter{01CF}{\v{I}}% % \DeclareUnicodeCharacter{01D0}{\v{\dotless{i}}}% \DeclareUnicodeCharacter{01D1}{\v{O}}% \DeclareUnicodeCharacter{01D2}{\v{o}}% \DeclareUnicodeCharacter{01D3}{\v{U}}% \DeclareUnicodeCharacter{01D4}{\v{u}}% % \DeclareUnicodeCharacter{01E2}{\={\AE}}% \DeclareUnicodeCharacter{01E3}{\={\ae}}% \DeclareUnicodeCharacter{01E6}{\v{G}}% \DeclareUnicodeCharacter{01E7}{\v{g}}% \DeclareUnicodeCharacter{01E8}{\v{K}}% \DeclareUnicodeCharacter{01E9}{\v{k}}% % \DeclareUnicodeCharacter{01F0}{\v{\dotless{j}}}% \DeclareUnicodeCharacter{01F1}{DZ}% \DeclareUnicodeCharacter{01F2}{Dz}% \DeclareUnicodeCharacter{01F3}{dz}% \DeclareUnicodeCharacter{01F4}{\'G}% \DeclareUnicodeCharacter{01F5}{\'g}% \DeclareUnicodeCharacter{01F8}{\`N}% \DeclareUnicodeCharacter{01F9}{\`n}% \DeclareUnicodeCharacter{01FC}{\'{\AE}}% \DeclareUnicodeCharacter{01FD}{\'{\ae}}% \DeclareUnicodeCharacter{01FE}{\'{\O}}% \DeclareUnicodeCharacter{01FF}{\'{\o}}% % \DeclareUnicodeCharacter{021E}{\v{H}}% \DeclareUnicodeCharacter{021F}{\v{h}}% % \DeclareUnicodeCharacter{0226}{\dotaccent{A}}% \DeclareUnicodeCharacter{0227}{\dotaccent{a}}% \DeclareUnicodeCharacter{0228}{\cedilla{E}}% \DeclareUnicodeCharacter{0229}{\cedilla{e}}% \DeclareUnicodeCharacter{022E}{\dotaccent{O}}% \DeclareUnicodeCharacter{022F}{\dotaccent{o}}% % \DeclareUnicodeCharacter{0232}{\=Y}% \DeclareUnicodeCharacter{0233}{\=y}% \DeclareUnicodeCharacter{0237}{\dotless{j}}% % \DeclareUnicodeCharacter{02BC}{'}% % \DeclareUnicodeCharacter{02DB}{\ogonek{ }}% % % Greek letters upper case \DeclareUnicodeCharacter{0391}{{\it A}}% \DeclareUnicodeCharacter{0392}{{\it B}}% \DeclareUnicodeCharacter{0393}{\ensuremath{\mit\Gamma}}% \DeclareUnicodeCharacter{0394}{\ensuremath{\mit\Delta}}% \DeclareUnicodeCharacter{0395}{{\it E}}% \DeclareUnicodeCharacter{0396}{{\it Z}}% \DeclareUnicodeCharacter{0397}{{\it H}}% \DeclareUnicodeCharacter{0398}{\ensuremath{\mit\Theta}}% \DeclareUnicodeCharacter{0399}{{\it I}}% \DeclareUnicodeCharacter{039A}{{\it K}}% \DeclareUnicodeCharacter{039B}{\ensuremath{\mit\Lambda}}% \DeclareUnicodeCharacter{039C}{{\it M}}% \DeclareUnicodeCharacter{039D}{{\it N}}% \DeclareUnicodeCharacter{039E}{\ensuremath{\mit\Xi}}% \DeclareUnicodeCharacter{039F}{{\it O}}% \DeclareUnicodeCharacter{03A0}{\ensuremath{\mit\Pi}}% \DeclareUnicodeCharacter{03A1}{{\it P}}% %\DeclareUnicodeCharacter{03A2}{} % none - corresponds to final sigma \DeclareUnicodeCharacter{03A3}{\ensuremath{\mit\Sigma}}% \DeclareUnicodeCharacter{03A4}{{\it T}}% \DeclareUnicodeCharacter{03A5}{\ensuremath{\mit\Upsilon}}% \DeclareUnicodeCharacter{03A6}{\ensuremath{\mit\Phi}}% \DeclareUnicodeCharacter{03A7}{{\it X}}% \DeclareUnicodeCharacter{03A8}{\ensuremath{\mit\Psi}}% \DeclareUnicodeCharacter{03A9}{\ensuremath{\mit\Omega}}% % % Vowels with accents \DeclareUnicodeCharacter{0390}{\ensuremath{\ddot{\acute\iota}}}% \DeclareUnicodeCharacter{03AC}{\ensuremath{\acute\alpha}}% \DeclareUnicodeCharacter{03AD}{\ensuremath{\acute\epsilon}}% \DeclareUnicodeCharacter{03AE}{\ensuremath{\acute\eta}}% \DeclareUnicodeCharacter{03AF}{\ensuremath{\acute\iota}}% \DeclareUnicodeCharacter{03B0}{\ensuremath{\acute{\ddot\upsilon}}}% % % Standalone accent \DeclareUnicodeCharacter{0384}{\ensuremath{\acute{\ }}}% % % Greek letters lower case \DeclareUnicodeCharacter{03B1}{\ensuremath\alpha}% \DeclareUnicodeCharacter{03B2}{\ensuremath\beta}% \DeclareUnicodeCharacter{03B3}{\ensuremath\gamma}% \DeclareUnicodeCharacter{03B4}{\ensuremath\delta}% \DeclareUnicodeCharacter{03B5}{\ensuremath\epsilon}% \DeclareUnicodeCharacter{03B6}{\ensuremath\zeta}% \DeclareUnicodeCharacter{03B7}{\ensuremath\eta}% \DeclareUnicodeCharacter{03B8}{\ensuremath\theta}% \DeclareUnicodeCharacter{03B9}{\ensuremath\iota}% \DeclareUnicodeCharacter{03BA}{\ensuremath\kappa}% \DeclareUnicodeCharacter{03BB}{\ensuremath\lambda}% \DeclareUnicodeCharacter{03BC}{\ensuremath\mu}% \DeclareUnicodeCharacter{03BD}{\ensuremath\nu}% \DeclareUnicodeCharacter{03BE}{\ensuremath\xi}% \DeclareUnicodeCharacter{03BF}{{\it o}}% omicron \DeclareUnicodeCharacter{03C0}{\ensuremath\pi}% \DeclareUnicodeCharacter{03C1}{\ensuremath\rho}% \DeclareUnicodeCharacter{03C2}{\ensuremath\varsigma}% \DeclareUnicodeCharacter{03C3}{\ensuremath\sigma}% \DeclareUnicodeCharacter{03C4}{\ensuremath\tau}% \DeclareUnicodeCharacter{03C5}{\ensuremath\upsilon}% \DeclareUnicodeCharacter{03C6}{\ensuremath\phi}% \DeclareUnicodeCharacter{03C7}{\ensuremath\chi}% \DeclareUnicodeCharacter{03C8}{\ensuremath\psi}% \DeclareUnicodeCharacter{03C9}{\ensuremath\omega}% % % More Greek vowels with accents \DeclareUnicodeCharacter{03CA}{\ensuremath{\ddot\iota}}% \DeclareUnicodeCharacter{03CB}{\ensuremath{\ddot\upsilon}}% \DeclareUnicodeCharacter{03CC}{\ensuremath{\acute o}}% \DeclareUnicodeCharacter{03CD}{\ensuremath{\acute\upsilon}}% \DeclareUnicodeCharacter{03CE}{\ensuremath{\acute\omega}}% % % Variant Greek letters \DeclareUnicodeCharacter{03D1}{\ensuremath\vartheta}% \DeclareUnicodeCharacter{03D6}{\ensuremath\varpi}% \DeclareUnicodeCharacter{03F1}{\ensuremath\varrho}% % \DeclareUnicodeCharacter{1E02}{\dotaccent{B}}% \DeclareUnicodeCharacter{1E03}{\dotaccent{b}}% \DeclareUnicodeCharacter{1E04}{\udotaccent{B}}% \DeclareUnicodeCharacter{1E05}{\udotaccent{b}}% \DeclareUnicodeCharacter{1E06}{\ubaraccent{B}}% \DeclareUnicodeCharacter{1E07}{\ubaraccent{b}}% \DeclareUnicodeCharacter{1E0A}{\dotaccent{D}}% \DeclareUnicodeCharacter{1E0B}{\dotaccent{d}}% \DeclareUnicodeCharacter{1E0C}{\udotaccent{D}}% \DeclareUnicodeCharacter{1E0D}{\udotaccent{d}}% \DeclareUnicodeCharacter{1E0E}{\ubaraccent{D}}% \DeclareUnicodeCharacter{1E0F}{\ubaraccent{d}}% % \DeclareUnicodeCharacter{1E1E}{\dotaccent{F}}% \DeclareUnicodeCharacter{1E1F}{\dotaccent{f}}% % \DeclareUnicodeCharacter{1E20}{\=G}% \DeclareUnicodeCharacter{1E21}{\=g}% \DeclareUnicodeCharacter{1E22}{\dotaccent{H}}% \DeclareUnicodeCharacter{1E23}{\dotaccent{h}}% \DeclareUnicodeCharacter{1E24}{\udotaccent{H}}% \DeclareUnicodeCharacter{1E25}{\udotaccent{h}}% \DeclareUnicodeCharacter{1E26}{\"H}% \DeclareUnicodeCharacter{1E27}{\"h}% % \DeclareUnicodeCharacter{1E30}{\'K}% \DeclareUnicodeCharacter{1E31}{\'k}% \DeclareUnicodeCharacter{1E32}{\udotaccent{K}}% \DeclareUnicodeCharacter{1E33}{\udotaccent{k}}% \DeclareUnicodeCharacter{1E34}{\ubaraccent{K}}% \DeclareUnicodeCharacter{1E35}{\ubaraccent{k}}% \DeclareUnicodeCharacter{1E36}{\udotaccent{L}}% \DeclareUnicodeCharacter{1E37}{\udotaccent{l}}% \DeclareUnicodeCharacter{1E3A}{\ubaraccent{L}}% \DeclareUnicodeCharacter{1E3B}{\ubaraccent{l}}% \DeclareUnicodeCharacter{1E3E}{\'M}% \DeclareUnicodeCharacter{1E3F}{\'m}% % \DeclareUnicodeCharacter{1E40}{\dotaccent{M}}% \DeclareUnicodeCharacter{1E41}{\dotaccent{m}}% \DeclareUnicodeCharacter{1E42}{\udotaccent{M}}% \DeclareUnicodeCharacter{1E43}{\udotaccent{m}}% \DeclareUnicodeCharacter{1E44}{\dotaccent{N}}% \DeclareUnicodeCharacter{1E45}{\dotaccent{n}}% \DeclareUnicodeCharacter{1E46}{\udotaccent{N}}% \DeclareUnicodeCharacter{1E47}{\udotaccent{n}}% \DeclareUnicodeCharacter{1E48}{\ubaraccent{N}}% \DeclareUnicodeCharacter{1E49}{\ubaraccent{n}}% % \DeclareUnicodeCharacter{1E54}{\'P}% \DeclareUnicodeCharacter{1E55}{\'p}% \DeclareUnicodeCharacter{1E56}{\dotaccent{P}}% \DeclareUnicodeCharacter{1E57}{\dotaccent{p}}% \DeclareUnicodeCharacter{1E58}{\dotaccent{R}}% \DeclareUnicodeCharacter{1E59}{\dotaccent{r}}% \DeclareUnicodeCharacter{1E5A}{\udotaccent{R}}% \DeclareUnicodeCharacter{1E5B}{\udotaccent{r}}% \DeclareUnicodeCharacter{1E5E}{\ubaraccent{R}}% \DeclareUnicodeCharacter{1E5F}{\ubaraccent{r}}% % \DeclareUnicodeCharacter{1E60}{\dotaccent{S}}% \DeclareUnicodeCharacter{1E61}{\dotaccent{s}}% \DeclareUnicodeCharacter{1E62}{\udotaccent{S}}% \DeclareUnicodeCharacter{1E63}{\udotaccent{s}}% \DeclareUnicodeCharacter{1E6A}{\dotaccent{T}}% \DeclareUnicodeCharacter{1E6B}{\dotaccent{t}}% \DeclareUnicodeCharacter{1E6C}{\udotaccent{T}}% \DeclareUnicodeCharacter{1E6D}{\udotaccent{t}}% \DeclareUnicodeCharacter{1E6E}{\ubaraccent{T}}% \DeclareUnicodeCharacter{1E6F}{\ubaraccent{t}}% % \DeclareUnicodeCharacter{1E7C}{\~V}% \DeclareUnicodeCharacter{1E7D}{\~v}% \DeclareUnicodeCharacter{1E7E}{\udotaccent{V}}% \DeclareUnicodeCharacter{1E7F}{\udotaccent{v}}% % \DeclareUnicodeCharacter{1E80}{\`W}% \DeclareUnicodeCharacter{1E81}{\`w}% \DeclareUnicodeCharacter{1E82}{\'W}% \DeclareUnicodeCharacter{1E83}{\'w}% \DeclareUnicodeCharacter{1E84}{\"W}% \DeclareUnicodeCharacter{1E85}{\"w}% \DeclareUnicodeCharacter{1E86}{\dotaccent{W}}% \DeclareUnicodeCharacter{1E87}{\dotaccent{w}}% \DeclareUnicodeCharacter{1E88}{\udotaccent{W}}% \DeclareUnicodeCharacter{1E89}{\udotaccent{w}}% \DeclareUnicodeCharacter{1E8A}{\dotaccent{X}}% \DeclareUnicodeCharacter{1E8B}{\dotaccent{x}}% \DeclareUnicodeCharacter{1E8C}{\"X}% \DeclareUnicodeCharacter{1E8D}{\"x}% \DeclareUnicodeCharacter{1E8E}{\dotaccent{Y}}% \DeclareUnicodeCharacter{1E8F}{\dotaccent{y}}% % \DeclareUnicodeCharacter{1E90}{\^Z}% \DeclareUnicodeCharacter{1E91}{\^z}% \DeclareUnicodeCharacter{1E92}{\udotaccent{Z}}% \DeclareUnicodeCharacter{1E93}{\udotaccent{z}}% \DeclareUnicodeCharacter{1E94}{\ubaraccent{Z}}% \DeclareUnicodeCharacter{1E95}{\ubaraccent{z}}% \DeclareUnicodeCharacter{1E96}{\ubaraccent{h}}% \DeclareUnicodeCharacter{1E97}{\"t}% \DeclareUnicodeCharacter{1E98}{\ringaccent{w}}% \DeclareUnicodeCharacter{1E99}{\ringaccent{y}}% % \DeclareUnicodeCharacter{1EA0}{\udotaccent{A}}% \DeclareUnicodeCharacter{1EA1}{\udotaccent{a}}% % \DeclareUnicodeCharacter{1EB8}{\udotaccent{E}}% \DeclareUnicodeCharacter{1EB9}{\udotaccent{e}}% \DeclareUnicodeCharacter{1EBC}{\~E}% \DeclareUnicodeCharacter{1EBD}{\~e}% % \DeclareUnicodeCharacter{1ECA}{\udotaccent{I}}% \DeclareUnicodeCharacter{1ECB}{\udotaccent{i}}% \DeclareUnicodeCharacter{1ECC}{\udotaccent{O}}% \DeclareUnicodeCharacter{1ECD}{\udotaccent{o}}% % \DeclareUnicodeCharacter{1EE4}{\udotaccent{U}}% \DeclareUnicodeCharacter{1EE5}{\udotaccent{u}}% % \DeclareUnicodeCharacter{1EF2}{\`Y}% \DeclareUnicodeCharacter{1EF3}{\`y}% \DeclareUnicodeCharacter{1EF4}{\udotaccent{Y}}% % \DeclareUnicodeCharacter{1EF8}{\~Y}% \DeclareUnicodeCharacter{1EF9}{\~y}% % % Punctuation \DeclareUnicodeCharacter{2013}{--}% \DeclareUnicodeCharacter{2014}{---}% \DeclareUnicodeCharacter{2018}{\quoteleft{}}% \DeclareUnicodeCharacter{2019}{\quoteright{}}% \DeclareUnicodeCharacter{201A}{\quotesinglbase{}}% \DeclareUnicodeCharacter{201C}{\quotedblleft{}}% \DeclareUnicodeCharacter{201D}{\quotedblright{}}% \DeclareUnicodeCharacter{201E}{\quotedblbase{}}% \DeclareUnicodeCharacter{2020}{\ensuremath\dagger}% \DeclareUnicodeCharacter{2021}{\ensuremath\ddagger}% \DeclareUnicodeCharacter{2022}{\bullet{}}% \DeclareUnicodeCharacter{202F}{\thinspace}% \DeclareUnicodeCharacter{2026}{\dots{}}% \DeclareUnicodeCharacter{2039}{\guilsinglleft{}}% \DeclareUnicodeCharacter{203A}{\guilsinglright{}}% % \DeclareUnicodeCharacter{20AC}{\euro{}}% % \DeclareUnicodeCharacter{2192}{\expansion{}}% \DeclareUnicodeCharacter{21D2}{\result{}}% % % Mathematical symbols \DeclareUnicodeCharacter{2200}{\ensuremath\forall}% \DeclareUnicodeCharacter{2203}{\ensuremath\exists}% \DeclareUnicodeCharacter{2208}{\ensuremath\in}% \DeclareUnicodeCharacter{2212}{\minus{}}% \DeclareUnicodeCharacter{2217}{\ast}% \DeclareUnicodeCharacter{221E}{\ensuremath\infty}% \DeclareUnicodeCharacter{2225}{\ensuremath\parallel}% \DeclareUnicodeCharacter{2227}{\ensuremath\wedge}% \DeclareUnicodeCharacter{2229}{\ensuremath\cap}% \DeclareUnicodeCharacter{2261}{\equiv{}}% \DeclareUnicodeCharacter{2264}{\ensuremath\leq}% \DeclareUnicodeCharacter{2265}{\ensuremath\geq}% \DeclareUnicodeCharacter{2282}{\ensuremath\subset}% \DeclareUnicodeCharacter{2287}{\ensuremath\supseteq}% % \DeclareUnicodeCharacter{2016}{\ensuremath\Vert}% \DeclareUnicodeCharacter{2032}{\ensuremath\prime}% \DeclareUnicodeCharacter{210F}{\ensuremath\hbar}% \DeclareUnicodeCharacter{2111}{\ensuremath\Im}% \DeclareUnicodeCharacter{2113}{\ensuremath\ell}% \DeclareUnicodeCharacter{2118}{\ensuremath\wp}% \DeclareUnicodeCharacter{211C}{\ensuremath\Re}% \DeclareUnicodeCharacter{2135}{\ensuremath\aleph}% \DeclareUnicodeCharacter{2190}{\ensuremath\leftarrow}% \DeclareUnicodeCharacter{2191}{\ensuremath\uparrow}% \DeclareUnicodeCharacter{2193}{\ensuremath\downarrow}% \DeclareUnicodeCharacter{2194}{\ensuremath\leftrightarrow}% \DeclareUnicodeCharacter{2195}{\ensuremath\updownarrow}% \DeclareUnicodeCharacter{2196}{\ensuremath\nwarrow}% \DeclareUnicodeCharacter{2197}{\ensuremath\nearrow}% \DeclareUnicodeCharacter{2198}{\ensuremath\searrow}% \DeclareUnicodeCharacter{2199}{\ensuremath\swarrow}% \DeclareUnicodeCharacter{21A6}{\ensuremath\mapsto}% \DeclareUnicodeCharacter{21A9}{\ensuremath\hookleftarrow}% \DeclareUnicodeCharacter{21AA}{\ensuremath\hookrightarrow}% \DeclareUnicodeCharacter{21BC}{\ensuremath\leftharpoonup}% \DeclareUnicodeCharacter{21BD}{\ensuremath\leftharpoondown}% \DeclareUnicodeCharacter{21C0}{\ensuremath\rightharpoonup}% \DeclareUnicodeCharacter{21C1}{\ensuremath\rightharpoondown}% \DeclareUnicodeCharacter{21CC}{\ensuremath\rightleftharpoons}% \DeclareUnicodeCharacter{21D0}{\ensuremath\Leftarrow}% \DeclareUnicodeCharacter{21D1}{\ensuremath\Uparrow}% \DeclareUnicodeCharacter{21D3}{\ensuremath\Downarrow}% \DeclareUnicodeCharacter{21D4}{\ensuremath\Leftrightarrow}% \DeclareUnicodeCharacter{21D5}{\ensuremath\Updownarrow}% \DeclareUnicodeCharacter{2202}{\ensuremath\partial}% \DeclareUnicodeCharacter{2205}{\ensuremath\emptyset}% \DeclareUnicodeCharacter{2207}{\ensuremath\nabla}% \DeclareUnicodeCharacter{2209}{\ensuremath\notin}% \DeclareUnicodeCharacter{220B}{\ensuremath\owns}% \DeclareUnicodeCharacter{220F}{\ensuremath\prod}% \DeclareUnicodeCharacter{2210}{\ensuremath\coprod}% \DeclareUnicodeCharacter{2211}{\ensuremath\sum}% \DeclareUnicodeCharacter{2213}{\ensuremath\mp}% \DeclareUnicodeCharacter{2218}{\ensuremath\circ}% \DeclareUnicodeCharacter{221A}{\ensuremath\surd}% \DeclareUnicodeCharacter{221D}{\ensuremath\propto}% \DeclareUnicodeCharacter{2220}{\ensuremath\angle}% \DeclareUnicodeCharacter{2223}{\ensuremath\mid}% \DeclareUnicodeCharacter{2228}{\ensuremath\vee}% \DeclareUnicodeCharacter{222A}{\ensuremath\cup}% \DeclareUnicodeCharacter{222B}{\ensuremath\smallint}% \DeclareUnicodeCharacter{222E}{\ensuremath\oint}% \DeclareUnicodeCharacter{223C}{\ensuremath\sim}% \DeclareUnicodeCharacter{2240}{\ensuremath\wr}% \DeclareUnicodeCharacter{2243}{\ensuremath\simeq}% \DeclareUnicodeCharacter{2245}{\ensuremath\cong}% \DeclareUnicodeCharacter{2248}{\ensuremath\approx}% \DeclareUnicodeCharacter{224D}{\ensuremath\asymp}% \DeclareUnicodeCharacter{2250}{\ensuremath\doteq}% \DeclareUnicodeCharacter{2260}{\ensuremath\neq}% \DeclareUnicodeCharacter{226A}{\ensuremath\ll}% \DeclareUnicodeCharacter{226B}{\ensuremath\gg}% \DeclareUnicodeCharacter{227A}{\ensuremath\prec}% \DeclareUnicodeCharacter{227B}{\ensuremath\succ}% \DeclareUnicodeCharacter{2283}{\ensuremath\supset}% \DeclareUnicodeCharacter{2286}{\ensuremath\subseteq}% \DeclareUnicodeCharacter{228E}{\ensuremath\uplus}% \DeclareUnicodeCharacter{2291}{\ensuremath\sqsubseteq}% \DeclareUnicodeCharacter{2292}{\ensuremath\sqsupseteq}% \DeclareUnicodeCharacter{2293}{\ensuremath\sqcap}% \DeclareUnicodeCharacter{2294}{\ensuremath\sqcup}% \DeclareUnicodeCharacter{2295}{\ensuremath\oplus}% \DeclareUnicodeCharacter{2296}{\ensuremath\ominus}% \DeclareUnicodeCharacter{2297}{\ensuremath\otimes}% \DeclareUnicodeCharacter{2298}{\ensuremath\oslash}% \DeclareUnicodeCharacter{2299}{\ensuremath\odot}% \DeclareUnicodeCharacter{22A2}{\ensuremath\vdash}% \DeclareUnicodeCharacter{22A3}{\ensuremath\dashv}% \DeclareUnicodeCharacter{22A4}{\ensuremath\ptextop}% \DeclareUnicodeCharacter{22A5}{\ensuremath\bot}% \DeclareUnicodeCharacter{22A8}{\ensuremath\models}% \DeclareUnicodeCharacter{22C0}{\ensuremath\bigwedge}% \DeclareUnicodeCharacter{22C1}{\ensuremath\bigvee}% \DeclareUnicodeCharacter{22C2}{\ensuremath\bigcap}% \DeclareUnicodeCharacter{22C3}{\ensuremath\bigcup}% \DeclareUnicodeCharacter{22C4}{\ensuremath\diamond}% \DeclareUnicodeCharacter{22C5}{\ensuremath\cdot}% \DeclareUnicodeCharacter{22C6}{\ensuremath\star}% \DeclareUnicodeCharacter{22C8}{\ensuremath\bowtie}% \DeclareUnicodeCharacter{2308}{\ensuremath\lceil}% \DeclareUnicodeCharacter{2309}{\ensuremath\rceil}% \DeclareUnicodeCharacter{230A}{\ensuremath\lfloor}% \DeclareUnicodeCharacter{230B}{\ensuremath\rfloor}% \DeclareUnicodeCharacter{2322}{\ensuremath\frown}% \DeclareUnicodeCharacter{2323}{\ensuremath\smile}% % \DeclareUnicodeCharacter{25B3}{\ensuremath\triangle}% \DeclareUnicodeCharacter{25B7}{\ensuremath\triangleright}% \DeclareUnicodeCharacter{25BD}{\ensuremath\bigtriangledown}% \DeclareUnicodeCharacter{25C1}{\ensuremath\triangleleft}% \DeclareUnicodeCharacter{25C7}{\ensuremath\diamond}% \DeclareUnicodeCharacter{2660}{\ensuremath\spadesuit}% \DeclareUnicodeCharacter{2661}{\ensuremath\heartsuit}% \DeclareUnicodeCharacter{2662}{\ensuremath\diamondsuit}% \DeclareUnicodeCharacter{2663}{\ensuremath\clubsuit}% \DeclareUnicodeCharacter{266D}{\ensuremath\flat}% \DeclareUnicodeCharacter{266E}{\ensuremath\natural}% \DeclareUnicodeCharacter{266F}{\ensuremath\sharp}% \DeclareUnicodeCharacter{26AA}{\ensuremath\bigcirc}% \DeclareUnicodeCharacter{27B9}{\ensuremath\rangle}% \DeclareUnicodeCharacter{27C2}{\ensuremath\perp}% \DeclareUnicodeCharacter{27E8}{\ensuremath\langle}% \DeclareUnicodeCharacter{27F5}{\ensuremath\longleftarrow}% \DeclareUnicodeCharacter{27F6}{\ensuremath\longrightarrow}% \DeclareUnicodeCharacter{27F7}{\ensuremath\longleftrightarrow}% \DeclareUnicodeCharacter{27FC}{\ensuremath\longmapsto}% \DeclareUnicodeCharacter{29F5}{\ensuremath\setminus}% \DeclareUnicodeCharacter{2A00}{\ensuremath\bigodot}% \DeclareUnicodeCharacter{2A01}{\ensuremath\bigoplus}% \DeclareUnicodeCharacter{2A02}{\ensuremath\bigotimes}% \DeclareUnicodeCharacter{2A04}{\ensuremath\biguplus}% \DeclareUnicodeCharacter{2A06}{\ensuremath\bigsqcup}% \DeclareUnicodeCharacter{2A3F}{\ensuremath\amalg}% \DeclareUnicodeCharacter{2AAF}{\ensuremath\preceq}% \DeclareUnicodeCharacter{2AB0}{\ensuremath\succeq}% % \global\mathchardef\checkmark="1370% actually the square root sign \DeclareUnicodeCharacter{2713}{\ensuremath\checkmark}% }% end of \unicodechardefs % UTF-8 byte sequence (pdfTeX) definitions (replacing and @U command) % It makes the setting that replace UTF-8 byte sequence. \def\utfeightchardefs{% \let\DeclareUnicodeCharacter\DeclareUnicodeCharacterUTFviii \unicodechardefs } % Whether the active definitions of non-ASCII characters expand to % non-active tokens with the same character code. This is used to % write characters literally, instead of using active definitions for % printing the correct glyphs. \newif\ifpassthroughchars \passthroughcharsfalse % For native Unicode handling (XeTeX and LuaTeX), % provide a definition macro to replace/pass-through a Unicode character % \def\DeclareUnicodeCharacterNative#1#2{% \catcode"#1=\active \def\dodeclareunicodecharacternative##1##2##3{% \begingroup \uccode`\~="##2\relax \uppercase{\gdef~}{% \ifpassthroughchars ##1% \else ##3% \fi } \endgroup } \begingroup \uccode`\.="#1\relax \uppercase{\def\UTFNativeTmp{.}}% \expandafter\dodeclareunicodecharacternative\UTFNativeTmp{#1}{#2}% \endgroup } % Native Unicode handling (XeTeX and LuaTeX) character replacing definition. % It activates the setting that replaces Unicode characters. \def\nativeunicodechardefs{% \let\DeclareUnicodeCharacter\DeclareUnicodeCharacterNative \unicodechardefs } % For native Unicode handling (XeTeX and LuaTeX), % make the character token expand % to the sequences given in \unicodechardefs for printing. \def\DeclareUnicodeCharacterNativeAtU#1#2{% \def\UTFAtUTmp{#2} \expandafter\globallet\csname uni:#1\endcsname \UTFAtUTmp } % @U command definitions for native Unicode handling (XeTeX and LuaTeX). \def\nativeunicodechardefsatu{% \let\DeclareUnicodeCharacter\DeclareUnicodeCharacterNativeAtU \unicodechardefs } % US-ASCII character definitions. \def\asciichardefs{% nothing need be done \relax } % Define all Unicode characters we know about. This makes UTF-8 the default % input encoding and allows @U to work. \iftxinativeunicodecapable \nativeunicodechardefsatu \else \utfeightchardefs \fi \message{formatting,} \newdimen\defaultparindent \defaultparindent = 15pt \chapheadingskip = 15pt plus 4pt minus 2pt \secheadingskip = 12pt plus 3pt minus 2pt \subsecheadingskip = 9pt plus 2pt minus 2pt % Prevent underfull vbox error messages. \vbadness = 10000 % Don't be very finicky about underfull hboxes, either. \hbadness = 6666 % Following George Bush, get rid of widows and orphans. \widowpenalty=10000 \clubpenalty=10000 % Use TeX 3.0's \emergencystretch to help line breaking, but if we're % using an old version of TeX, don't do anything. We want the amount of % stretch added to depend on the line length, hence the dependence on % \hsize. We call this whenever the paper size is set. % \def\setemergencystretch{% \ifx\emergencystretch\thisisundefined % Allow us to assign to \emergencystretch anyway. \def\emergencystretch{\dimen0}% \else \emergencystretch = .15\hsize \fi } % Parameters in order: 1) textheight; 2) textwidth; % 3) voffset; 4) hoffset; 5) binding offset; 6) topskip; % 7) physical page height; 8) physical page width. % % We also call \setleading{\textleading}, so the caller should define % \textleading. The caller should also set \parskip. % \def\internalpagesizes#1#2#3#4#5#6#7#8{% \voffset = #3\relax \topskip = #6\relax \splittopskip = \topskip % \vsize = #1\relax \advance\vsize by \topskip \outervsize = \vsize \advance\outervsize by 2\topandbottommargin \txipageheight = \vsize % \hsize = #2\relax \outerhsize = \hsize \advance\outerhsize by 0.5in \txipagewidth = \hsize % \normaloffset = #4\relax \bindingoffset = #5\relax % \ifpdf \pdfpageheight #7\relax \pdfpagewidth #8\relax % if we don't reset these, they will remain at "1 true in" of % whatever layout pdftex was dumped with. \pdfhorigin = 1 true in \pdfvorigin = 1 true in \else \ifx\XeTeXrevision\thisisundefined \special{papersize=#8,#7}% \else \pdfpageheight #7\relax \pdfpagewidth #8\relax % XeTeX does not have \pdfhorigin and \pdfvorigin. \fi \fi % \setleading{\textleading} % \parindent = \defaultparindent \setemergencystretch } % @letterpaper (the default). \def\letterpaper{{\globaldefs = 1 \parskip = 3pt plus 2pt minus 1pt \textleading = 13.2pt % % If page is nothing but text, make it come out even. \internalpagesizes{607.2pt}{6in}% that's 46 lines {\voffset}{.25in}% {\bindingoffset}{36pt}% {11in}{8.5in}% }} % Use @smallbook to reset parameters for 7x9.25 trim size. \def\smallbook{{\globaldefs = 1 \parskip = 2pt plus 1pt \textleading = 12pt % \internalpagesizes{7.5in}{5in}% {-.2in}{0in}% {\bindingoffset}{16pt}% {9.25in}{7in}% % \lispnarrowing = 0.3in \tolerance = 700 \contentsrightmargin = 0pt \defbodyindent = .5cm }} % Use @smallerbook to reset parameters for 6x9 trim size. % (Just testing, parameters still in flux.) \def\smallerbook{{\globaldefs = 1 \parskip = 1.5pt plus 1pt \textleading = 12pt % \internalpagesizes{7.4in}{4.8in}% {-.2in}{-.4in}% {0pt}{14pt}% {9in}{6in}% % \lispnarrowing = 0.25in \tolerance = 700 \contentsrightmargin = 0pt \defbodyindent = .4cm }} % Use @afourpaper to print on European A4 paper. \def\afourpaper{{\globaldefs = 1 \parskip = 3pt plus 2pt minus 1pt \textleading = 13.2pt % % Double-side printing via postscript on Laserjet 4050 % prints double-sided nicely when \bindingoffset=10mm and \hoffset=-6mm. % To change the settings for a different printer or situation, adjust % \normaloffset until the front-side and back-side texts align. Then % do the same for \bindingoffset. You can set these for testing in % your texinfo source file like this: % @tex % \global\normaloffset = -6mm % \global\bindingoffset = 10mm % @end tex \internalpagesizes{673.2pt}{160mm}% that's 51 lines {\voffset}{\hoffset}% {\bindingoffset}{44pt}% {297mm}{210mm}% % \tolerance = 700 \contentsrightmargin = 0pt \defbodyindent = 5mm }} % Use @afivepaper to print on European A5 paper. % From romildo@urano.iceb.ufop.br, 2 July 2000. % He also recommends making @example and @lisp be small. \def\afivepaper{{\globaldefs = 1 \parskip = 2pt plus 1pt minus 0.1pt \textleading = 12.5pt % \internalpagesizes{160mm}{120mm}% {\voffset}{\hoffset}% {\bindingoffset}{8pt}% {210mm}{148mm}% % \lispnarrowing = 0.2in \tolerance = 800 \contentsrightmargin = 0pt \defbodyindent = 2mm \tableindent = 12mm }} % A specific text layout, 24x15cm overall, intended for A4 paper. \def\afourlatex{{\globaldefs = 1 \afourpaper \internalpagesizes{237mm}{150mm}% {\voffset}{4.6mm}% {\bindingoffset}{7mm}% {297mm}{210mm}% % % Must explicitly reset to 0 because we call \afourpaper. \globaldefs = 0 }} % Use @afourwide to print on A4 paper in landscape format. \def\afourwide{{\globaldefs = 1 \afourpaper \internalpagesizes{241mm}{165mm}% {\voffset}{-2.95mm}% {\bindingoffset}{7mm}% {297mm}{210mm}% \globaldefs = 0 }} \def\bsixpaper{{\globaldefs = 1 \afourpaper \internalpagesizes{140mm}{100mm}% {-6.35mm}{-12.7mm}% {\bindingoffset}{14pt}% {176mm}{125mm}% \let\SETdispenvsize=\smallword \lispnarrowing = 0.2in \globaldefs = 0 }} % @pagesizes TEXTHEIGHT[,TEXTWIDTH] % Perhaps we should allow setting the margins, \topskip, \parskip, % and/or leading, also. Or perhaps we should compute them somehow. % \parseargdef\pagesizes{\pagesizesyyy #1,,\finish} \def\pagesizesyyy#1,#2,#3\finish{{% \setbox0 = \hbox{\ignorespaces #2}\ifdim\wd0 > 0pt \hsize=#2\relax \fi \globaldefs = 1 % \parskip = 3pt plus 2pt minus 1pt \setleading{\textleading}% % \dimen0 = #1\relax \advance\dimen0 by 2.5in % default 1in margin above heading line % and 1.5in to include heading, footing and % bottom margin % \dimen2 = \hsize \advance\dimen2 by 2in % default to 1 inch margin on each side % \internalpagesizes{#1}{\hsize}% {\voffset}{\normaloffset}% {\bindingoffset}{44pt}% {\dimen0}{\dimen2}% }} % Set default to letter. % \letterpaper % Default value of \hfuzz, for suppressing warnings about overfull hboxes. \hfuzz = 1pt \message{and turning on texinfo input format.} \def^^L{\par} % remove \outer, so ^L can appear in an @comment % DEL is a comment character, in case @c does not suffice. \catcode`\^^? = 14 % Define macros to output various characters with catcode for normal text. \catcode`\"=\other \def\normaldoublequote{"} \catcode`\$=\other \def\normaldollar{$}%$ font-lock fix \catcode`\+=\other \def\normalplus{+} \catcode`\<=\other \def\normalless{<} \catcode`\>=\other \def\normalgreater{>} \catcode`\^=\other \def\normalcaret{^} \catcode`\_=\other \def\normalunderscore{_} \catcode`\|=\other \def\normalverticalbar{|} \catcode`\~=\other \def\normaltilde{~} % This macro is used to make a character print one way in \tt % (where it can probably be output as-is), and another way in other fonts, % where something hairier probably needs to be done. % % #1 is what to print if we are indeed using \tt; #2 is what to print % otherwise. Since all the Computer Modern typewriter fonts have zero % interword stretch (and shrink), and it is reasonable to expect all % typewriter fonts to have this, we can check that font parameter. % \def\ifusingtt#1#2{\ifdim \fontdimen3\font=0pt #1\else #2\fi} % Same as above, but check for italic font. Actually this also catches % non-italic slanted fonts since it is impossible to distinguish them from % italic fonts. But since this is only used by $ and it uses \sl anyway % this is not a problem. \def\ifusingit#1#2{\ifdim \fontdimen1\font>0pt #1\else #2\fi} % Set catcodes for Texinfo file % Active characters for printing the wanted glyph. % Most of these we simply print from the \tt font, but for some, we can % use math or other variants that look better in normal text. % \catcode`\"=\active \def\activedoublequote{{\tt\char34}} \let"=\activedoublequote \catcode`\~=\active \def\activetilde{{\tt\char126}} \let~ = \activetilde \chardef\hatchar=`\^ \catcode`\^=\active \def\activehat{{\tt \hatchar}} \let^ = \activehat \catcode`\_=\active \def_{\ifusingtt\normalunderscore\_} \def\_{\leavevmode \kern.07em \vbox{\hrule width.3em height.1ex}\kern .07em } \let\realunder=_ \catcode`\|=\active \def|{{\tt\char124}} \chardef \less=`\< \catcode`\<=\active \def\activeless{{\tt \less}}\let< = \activeless \chardef \gtr=`\> \catcode`\>=\active \def\activegtr{{\tt \gtr}}\let> = \activegtr \catcode`\+=\active \def+{{\tt \char 43}} \catcode`\$=\active \def${\ifusingit{{\sl\$}}\normaldollar}%$ font-lock fix \catcode`\-=\active \let-=\normaldash % used for headline/footline in the output routine, in case the page % breaks in the middle of an @tex block. \def\texinfochars{% \let< = \activeless \let> = \activegtr \let~ = \activetilde \let^ = \activehat \setregularquotes \let\b = \strong \let\i = \smartitalic % in principle, all other definitions in \tex have to be undone too. } % Used sometimes to turn off (effectively) the active characters even after % parsing them. \def\turnoffactive{% \normalturnoffactive \otherbackslash } \catcode`\@=0 % \backslashcurfont outputs one backslash character in current font, % as in \char`\\. \global\chardef\backslashcurfont=`\\ % \realbackslash is an actual character `\' with catcode other. {\catcode`\\=\other @gdef@realbackslash{\}} % In Texinfo, backslash is an active character; it prints the backslash % in fixed width font. \catcode`\\=\active % @ for escape char from now on. % Print a typewriter backslash. For math mode, we can't simply use % \backslashcurfont: the story here is that in math mode, the \char % of \backslashcurfont ends up printing the roman \ from the math symbol % font (because \char in math mode uses the \mathcode, and plain.tex % sets \mathcode`\\="026E). Hence we use an explicit \mathchar, % which is the decimal equivalent of "715c (class 7, e.g., use \fam; % ignored family value; char position "5C). We can't use " for the % usual hex value because it has already been made active. @def@ttbackslash{{@tt @ifmmode @mathchar29020 @else @backslashcurfont @fi}} @let@backslashchar = @ttbackslash % @backslashchar{} is for user documents. % \otherbackslash defines an active \ to be a literal `\' character with % catcode other. @gdef@otherbackslash{@let\=@realbackslash} % Same as @turnoffactive except outputs \ as {\tt\char`\\} instead of % the literal character `\'. % {@catcode`- = @active @gdef@normalturnoffactive{% @passthroughcharstrue @let-=@normaldash @let"=@normaldoublequote @let$=@normaldollar %$ font-lock fix @let+=@normalplus @let<=@normalless @let>=@normalgreater @let^=@normalcaret @let_=@normalunderscore @let|=@normalverticalbar @let~=@normaltilde @let\=@ttbackslash @setregularquotes @unsepspaces } } % If a .fmt file is being used, characters that might appear in a file % name cannot be active until we have parsed the command line. % So turn them off again, and have @fixbackslash turn them back on. @catcode`+=@other @catcode`@_=@other % \enablebackslashhack - allow file to begin `\input texinfo' % % If a .fmt file is being used, we don't want the `\input texinfo' to show up. % That is what \eatinput is for; after that, the `\' should revert to printing % a backslash. % If the file did not have a `\input texinfo', then it is turned off after % the first line; otherwise the first `\' in the file would cause an error. % This is used on the very last line of this file, texinfo.tex. % We also use @c to call @fixbackslash, in case ends of lines are hidden. { @catcode`@^=7 @catcode`@^^M=13@gdef@enablebackslashhack{% @global@let\ = @eatinput% @catcode`@^^M=13% @def@c{@fixbackslash@c}% % Definition for the newline at the end of this file. @def ^^M{@let^^M@secondlinenl}% % Definition for a newline in the main Texinfo file. @gdef @secondlinenl{@fixbackslash}% % In case the first line has a whole-line command on it @let@originalparsearg@parsearg @def@parsearg{@fixbackslash@originalparsearg} }} {@catcode`@^=7 @catcode`@^^M=13% @gdef@eatinput input texinfo#1^^M{@fixbackslash}} % Emergency active definition of newline, in case an active newline token % appears by mistake. {@catcode`@^=7 @catcode13=13% @gdef@enableemergencynewline{% @gdef^^M{% @par% %@par% }}} @gdef@fixbackslash{% @ifx\@eatinput @let\ = @ttbackslash @fi @catcode13=5 % regular end of line @enableemergencynewline @let@c=@comment @let@parsearg@originalparsearg % Also turn back on active characters that might appear in the input % file name, in case not using a pre-dumped format. @catcode`+=@active @catcode`@_=@active % % If texinfo.cnf is present on the system, read it. % Useful for site-wide @afourpaper, etc. This macro, @fixbackslash, gets % called at the beginning of every Texinfo file. Not opening texinfo.cnf % directly in this file, texinfo.tex, makes it possible to make a format % file for Texinfo. % @openin 1 texinfo.cnf @ifeof 1 @else @input texinfo.cnf @fi @closein 1 } % Say @foo, not \foo, in error messages. @escapechar = `@@ % These (along with & and #) are made active for url-breaking, so need % active definitions as the normal characters. @def@normaldot{.} @def@normalquest{?} @def@normalslash{/} % These look ok in all fonts, so just make them not special. % @hashchar{} gets its own user-level command, because of #line. @catcode`@& = @other @def@normalamp{&} @catcode`@# = @other @def@normalhash{#} @catcode`@% = @other @def@normalpercent{%} @let @hashchar = @normalhash @c Finally, make ` and ' active, so that txicodequoteundirected and @c txicodequotebacktick work right in, e.g., @w{@code{`foo'}}. If we @c don't make ` and ' active, @code will not get them as active chars. @c Do this last of all since we use ` in the previous @catcode assignments. @catcode`@'=@active @catcode`@`=@active @setregularquotes @c Local variables: @c eval: (add-hook 'before-save-hook 'time-stamp) @c page-delimiter: "^\\\\message\\|emacs-page" @c time-stamp-start: "def\\\\texinfoversion{" @c time-stamp-format: "%:y-%02m-%02d.%02H" @c time-stamp-end: "}" @c End: @c vim:sw=2: @enablebackslashhack autoconf-2.71/build-aux/gendocs.sh0000755000000000000000000004142514004621310014020 00000000000000#!/bin/sh -e # gendocs.sh -- generate a GNU manual in many formats. This script is # mentioned in maintain.texi. See the help message below for usage details. scriptversion=2021-01-01.00 # Copyright 2003-2021 Free Software Foundation, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Original author: Mohit Agarwal. # Send bug reports and any other correspondence to bug-gnulib@gnu.org. # # The latest version of this script, and the companion template, is # available from the Gnulib repository: # # https://git.savannah.gnu.org/cgit/gnulib.git/tree/build-aux/gendocs.sh # https://git.savannah.gnu.org/cgit/gnulib.git/tree/doc/gendocs_template # TODO: # - image importing was only implemented for HTML generated by # makeinfo. But it should be simple enough to adjust. # - images are not imported in the source tarball. All the needed # formats (PDF, PNG, etc.) should be included. prog=`basename "$0"` srcdir=`pwd` scripturl="https://git.savannah.gnu.org/cgit/gnulib.git/plain/build-aux/gendocs.sh" templateurl="https://git.savannah.gnu.org/cgit/gnulib.git/plain/doc/gendocs_template" : ${SETLANG="env LANG= LC_MESSAGES= LC_ALL= LANGUAGE="} : ${MAKEINFO="makeinfo"} : ${TEXI2DVI="texi2dvi"} : ${DOCBOOK2HTML="docbook2html"} : ${DOCBOOK2PDF="docbook2pdf"} : ${DOCBOOK2TXT="docbook2txt"} : ${GENDOCS_TEMPLATE_DIR="."} : ${PERL='perl'} : ${TEXI2HTML="texi2html"} unset CDPATH unset use_texi2html MANUAL_TITLE= PACKAGE= EMAIL=webmasters@gnu.org # please override with --email commonarg= # passed to all makeinfo/texi2html invcations. dirargs= # passed to all tools (-I dir). dirs= # -I directories. htmlarg="--css-ref=/software/gnulib/manual.css -c TOP_NODE_UP_URL=/manual" default_htmlarg=true infoarg=--no-split generate_ascii=true generate_html=true generate_info=true generate_tex=true outdir=manual source_extra= split=node srcfile= texarg="-t @finalout" version="gendocs.sh $scriptversion Copyright 2021 Free Software Foundation, Inc. There is NO warranty. You may redistribute this software under the terms of the GNU General Public License. For more information about these matters, see the files named COPYING." usage="Usage: $prog [OPTION]... PACKAGE MANUAL-TITLE Generate output in various formats from PACKAGE.texinfo (or .texi or .txi) source. See the GNU Maintainers document for a more extensive discussion: https://www.gnu.org/prep/maintain_toc.html Options: --email ADR use ADR as contact in generated web pages; always give this. -s SRCFILE read Texinfo from SRCFILE, instead of PACKAGE.{texinfo|texi|txi} -o OUTDIR write files into OUTDIR, instead of manual/. -I DIR append DIR to the Texinfo search path. --common ARG pass ARG in all invocations. --html ARG pass ARG to makeinfo or texi2html for HTML targets, instead of '$htmlarg'. --info ARG pass ARG to makeinfo for Info, instead of --no-split. --no-ascii skip generating the plain text output. --no-html skip generating the html output. --no-info skip generating the info output. --no-tex skip generating the dvi and pdf output. --source ARG include ARG in tar archive of sources. --split HOW make split HTML by node, section, chapter; default node. --tex ARG pass ARG to texi2dvi for DVI and PDF, instead of -t @finalout. --texi2html use texi2html to make HTML target, with all split versions. --docbook convert through DocBook too (xml, txt, html, pdf). --help display this help and exit successfully. --version display version information and exit successfully. Simple example: $prog --email bug-gnu-emacs@gnu.org emacs \"GNU Emacs Manual\" Typical sequence: cd PACKAGESOURCE/doc wget \"$scripturl\" wget \"$templateurl\" $prog --email BUGLIST MANUAL \"GNU MANUAL - One-line description\" Output will be in a new subdirectory \"manual\" (by default; use -o OUTDIR to override). Move all the new files into your web CVS tree, as explained in the Web Pages node of maintain.texi. Please use the --email ADDRESS option so your own bug-reporting address will be used in the generated HTML pages. MANUAL-TITLE is included as part of the HTML of the overall manual/index.html file. It should include the name of the package being documented. manual/index.html is created by substitution from the file $GENDOCS_TEMPLATE_DIR/gendocs_template. (Feel free to modify the generic template for your own purposes.) If you have several manuals, you'll need to run this script several times with different MANUAL values, specifying a different output directory with -o each time. Then write (by hand) an overall index.html with links to them all. If a manual's Texinfo sources are spread across several directories, first copy or symlink all Texinfo sources into a single directory. (Part of the script's work is to make a tar.gz of the sources.) As implied above, by default monolithic Info files are generated. If you want split Info, or other Info options, use --info to override. You can set the environment variables MAKEINFO, TEXI2DVI, TEXI2HTML, and PERL to control the programs that get executed, and GENDOCS_TEMPLATE_DIR to control where the gendocs_template file is looked for. With --docbook, the environment variables DOCBOOK2HTML, DOCBOOK2PDF, and DOCBOOK2TXT are also consulted. By default, makeinfo and texi2dvi are run in the default (English) locale, since that's the language of most Texinfo manuals. If you happen to have a non-English manual and non-English web site, see the SETLANG setting in the source. Email bug reports or enhancement requests to bug-gnulib@gnu.org. " while test $# -gt 0; do case $1 in -s) shift; srcfile=$1;; -o) shift; outdir=$1;; -I) shift; dirargs="$dirargs -I '$1'"; dirs="$dirs $1";; --common) shift; commonarg=$1;; --docbook) docbook=yes;; --email) shift; EMAIL=$1;; --html) shift; default_htmlarg=false; htmlarg=$1;; --info) shift; infoarg=$1;; --no-ascii) generate_ascii=false;; --no-html) generate_ascii=false;; --no-info) generate_info=false;; --no-tex) generate_tex=false;; --source) shift; source_extra=$1;; --split) shift; split=$1;; --tex) shift; texarg=$1;; --texi2html) use_texi2html=1;; --help) echo "$usage"; exit 0;; --version) echo "$version"; exit 0;; -*) echo "$0: Unknown option \`$1'." >&2 echo "$0: Try \`--help' for more information." >&2 exit 1;; *) if test -z "$PACKAGE"; then PACKAGE=$1 elif test -z "$MANUAL_TITLE"; then MANUAL_TITLE=$1 else echo "$0: extra non-option argument \`$1'." >&2 exit 1 fi;; esac shift done # makeinfo uses the dirargs, but texi2dvi doesn't. commonarg=" $dirargs $commonarg" # For most of the following, the base name is just $PACKAGE base=$PACKAGE if $default_htmlarg && test -n "$use_texi2html"; then # The legacy texi2html doesn't support TOP_NODE_UP_URL htmlarg="--css-ref=/software/gnulib/manual.css" fi if test -n "$srcfile"; then # but here, we use the basename of $srcfile base=`basename "$srcfile"` case $base in *.txi|*.texi|*.texinfo) base=`echo "$base"|sed 's/\.[texinfo]*$//'`;; esac PACKAGE=$base elif test -s "$srcdir/$PACKAGE.texinfo"; then srcfile=$srcdir/$PACKAGE.texinfo elif test -s "$srcdir/$PACKAGE.texi"; then srcfile=$srcdir/$PACKAGE.texi elif test -s "$srcdir/$PACKAGE.txi"; then srcfile=$srcdir/$PACKAGE.txi else echo "$0: cannot find .texinfo or .texi or .txi for $PACKAGE in $srcdir." >&2 exit 1 fi if test ! -r $GENDOCS_TEMPLATE_DIR/gendocs_template; then echo "$0: cannot read $GENDOCS_TEMPLATE_DIR/gendocs_template." >&2 echo "$0: it is available from $templateurl." >&2 exit 1 fi # Function to return size of $1 in something resembling kilobytes. calcsize() { size=`ls -ksl $1 | awk '{print $1}'` echo $size } # copy_images OUTDIR HTML-FILE... # ------------------------------- # Copy all the images needed by the HTML-FILEs into OUTDIR. # Look for them in . and the -I directories; this is simpler than what # makeinfo supports with -I, but hopefully it will suffice. copy_images() { local odir odir=$1 shift $PERL -n -e " BEGIN { \$me = '$prog'; \$odir = '$odir'; @dirs = qw(. $dirs); } " -e ' /<img src="(.*?)"/g && ++$need{$1}; END { #print "$me: @{[keys %need]}\n"; # for debugging, show images found. FILE: for my $f (keys %need) { for my $d (@dirs) { if (-f "$d/$f") { use File::Basename; my $dest = dirname ("$odir/$f"); # use File::Path; -d $dest || mkpath ($dest) || die "$me: cannot mkdir $dest: $!\n"; # use File::Copy; copy ("$d/$f", $dest) || die "$me: cannot copy $d/$f to $dest: $!\n"; next FILE; } } die "$me: $ARGV: cannot find image $f\n"; } } ' -- "$@" || exit 1 } case $outdir in /*) abs_outdir=$outdir;; *) abs_outdir=$srcdir/$outdir;; esac echo "Making output for $srcfile" echo " in `pwd`" mkdir -p "$outdir/" # if $generate_info; then cmd="$SETLANG $MAKEINFO -o $PACKAGE.info $commonarg $infoarg \"$srcfile\"" echo "Generating info... ($cmd)" rm -f $PACKAGE.info* # get rid of any strays eval "$cmd" tar czf "$outdir/$PACKAGE.info.tar.gz" $PACKAGE.info* ls -l "$outdir/$PACKAGE.info.tar.gz" info_tgz_size=`calcsize "$outdir/$PACKAGE.info.tar.gz"` # do not mv the info files, there's no point in having them available # separately on the web. fi # end info # if $generate_tex; then cmd="$SETLANG $TEXI2DVI $dirargs $texarg \"$srcfile\"" printf "\nGenerating dvi... ($cmd)\n" eval "$cmd" # compress/finish dvi: gzip -f -9 $PACKAGE.dvi dvi_gz_size=`calcsize $PACKAGE.dvi.gz` mv $PACKAGE.dvi.gz "$outdir/" ls -l "$outdir/$PACKAGE.dvi.gz" cmd="$SETLANG $TEXI2DVI --pdf $dirargs $texarg \"$srcfile\"" printf "\nGenerating pdf... ($cmd)\n" eval "$cmd" pdf_size=`calcsize $PACKAGE.pdf` mv $PACKAGE.pdf "$outdir/" ls -l "$outdir/$PACKAGE.pdf" fi # end tex (dvi + pdf) # if $generate_ascii; then opt="-o $PACKAGE.txt --no-split --no-headers $commonarg" cmd="$SETLANG $MAKEINFO $opt \"$srcfile\"" printf "\nGenerating ascii... ($cmd)\n" eval "$cmd" ascii_size=`calcsize $PACKAGE.txt` gzip -f -9 -c $PACKAGE.txt >"$outdir/$PACKAGE.txt.gz" ascii_gz_size=`calcsize "$outdir/$PACKAGE.txt.gz"` mv $PACKAGE.txt "$outdir/" ls -l "$outdir/$PACKAGE.txt" "$outdir/$PACKAGE.txt.gz" fi # if $generate_html; then # Split HTML at level $1. Used for texi2html. html_split() { opt="--split=$1 --node-files $commonarg $htmlarg" cmd="$SETLANG $TEXI2HTML --output $PACKAGE.html $opt \"$srcfile\"" printf "\nGenerating html by $1... ($cmd)\n" eval "$cmd" split_html_dir=$PACKAGE.html ( cd ${split_html_dir} || exit 1 ln -sf ${PACKAGE}.html index.html tar -czf "$abs_outdir/${PACKAGE}.html_$1.tar.gz" -- *.html ) eval html_$1_tgz_size=`calcsize "$outdir/${PACKAGE}.html_$1.tar.gz"` rm -f "$outdir"/html_$1/*.html mkdir -p "$outdir/html_$1/" mv ${split_html_dir}/*.html "$outdir/html_$1/" rmdir ${split_html_dir} } if test -z "$use_texi2html"; then opt="--no-split --html -o $PACKAGE.html $commonarg $htmlarg" cmd="$SETLANG $MAKEINFO $opt \"$srcfile\"" printf "\nGenerating monolithic html... ($cmd)\n" rm -rf $PACKAGE.html # in case a directory is left over eval "$cmd" html_mono_size=`calcsize $PACKAGE.html` gzip -f -9 -c $PACKAGE.html >"$outdir/$PACKAGE.html.gz" html_mono_gz_size=`calcsize "$outdir/$PACKAGE.html.gz"` copy_images "$outdir/" $PACKAGE.html mv $PACKAGE.html "$outdir/" ls -l "$outdir/$PACKAGE.html" "$outdir/$PACKAGE.html.gz" # Before Texinfo 5.0, makeinfo did not accept a --split=HOW option, # it just always split by node. So if we're splitting by node anyway, # leave it out. if test "x$split" = xnode; then split_arg= else split_arg=--split=$split fi # opt="--html -o $PACKAGE.html $split_arg $commonarg $htmlarg" cmd="$SETLANG $MAKEINFO $opt \"$srcfile\"" printf "\nGenerating html by $split... ($cmd)\n" eval "$cmd" split_html_dir=$PACKAGE.html copy_images $split_html_dir/ $split_html_dir/*.html ( cd $split_html_dir || exit 1 tar -czf "$abs_outdir/$PACKAGE.html_$split.tar.gz" -- * ) eval \ html_${split}_tgz_size=`calcsize "$outdir/$PACKAGE.html_$split.tar.gz"` rm -rf "$outdir/html_$split/" mv $split_html_dir "$outdir/html_$split/" du -s "$outdir/html_$split/" ls -l "$outdir/$PACKAGE.html_$split.tar.gz" else # use texi2html: opt="--output $PACKAGE.html $commonarg $htmlarg" cmd="$SETLANG $TEXI2HTML $opt \"$srcfile\"" printf "\nGenerating monolithic html with texi2html... ($cmd)\n" rm -rf $PACKAGE.html # in case a directory is left over eval "$cmd" html_mono_size=`calcsize $PACKAGE.html` gzip -f -9 -c $PACKAGE.html >"$outdir/$PACKAGE.html.gz" html_mono_gz_size=`calcsize "$outdir/$PACKAGE.html.gz"` mv $PACKAGE.html "$outdir/" html_split node html_split chapter html_split section fi fi # end html # printf "\nMaking .tar.gz for sources...\n" d=`dirname $srcfile` ( cd "$d" srcfiles=`ls -d *.texinfo *.texi *.txi *.eps $source_extra 2>/dev/null` || true tar czfh "$abs_outdir/$PACKAGE.texi.tar.gz" $srcfiles ls -l "$abs_outdir/$PACKAGE.texi.tar.gz" ) texi_tgz_size=`calcsize "$outdir/$PACKAGE.texi.tar.gz"` # # Do everything again through docbook. if test -n "$docbook"; then opt="-o - --docbook $commonarg" cmd="$SETLANG $MAKEINFO $opt \"$srcfile\" >${srcdir}/$PACKAGE-db.xml" printf "\nGenerating docbook XML... ($cmd)\n" eval "$cmd" docbook_xml_size=`calcsize $PACKAGE-db.xml` gzip -f -9 -c $PACKAGE-db.xml >"$outdir/$PACKAGE-db.xml.gz" docbook_xml_gz_size=`calcsize "$outdir/$PACKAGE-db.xml.gz"` mv $PACKAGE-db.xml "$outdir/" split_html_db_dir=html_node_db opt="$commonarg -o $split_html_db_dir" cmd="$DOCBOOK2HTML $opt \"${outdir}/$PACKAGE-db.xml\"" printf "\nGenerating docbook HTML... ($cmd)\n" eval "$cmd" ( cd ${split_html_db_dir} || exit 1 tar -czf "$abs_outdir/${PACKAGE}.html_node_db.tar.gz" -- *.html ) html_node_db_tgz_size=`calcsize "$outdir/${PACKAGE}.html_node_db.tar.gz"` rm -f "$outdir"/html_node_db/*.html mkdir -p "$outdir/html_node_db" mv ${split_html_db_dir}/*.html "$outdir/html_node_db/" rmdir ${split_html_db_dir} cmd="$DOCBOOK2TXT \"${outdir}/$PACKAGE-db.xml\"" printf "\nGenerating docbook ASCII... ($cmd)\n" eval "$cmd" docbook_ascii_size=`calcsize $PACKAGE-db.txt` mv $PACKAGE-db.txt "$outdir/" cmd="$DOCBOOK2PDF \"${outdir}/$PACKAGE-db.xml\"" printf "\nGenerating docbook PDF... ($cmd)\n" eval "$cmd" docbook_pdf_size=`calcsize $PACKAGE-db.pdf` mv $PACKAGE-db.pdf "$outdir/" fi # printf "\nMaking index.html for $PACKAGE...\n" if test -z "$use_texi2html"; then CONDS="/%%IF *HTML_SECTION%%/,/%%ENDIF *HTML_SECTION%%/d;\ /%%IF *HTML_CHAPTER%%/,/%%ENDIF *HTML_CHAPTER%%/d" else # should take account of --split here. CONDS="/%%ENDIF.*%%/d;/%%IF *HTML_SECTION%%/d;/%%IF *HTML_CHAPTER%%/d" fi curdate=`$SETLANG date '+%B %d, %Y'` sed \ -e "s!%%TITLE%%!$MANUAL_TITLE!g" \ -e "s!%%EMAIL%%!$EMAIL!g" \ -e "s!%%PACKAGE%%!$PACKAGE!g" \ -e "s!%%DATE%%!$curdate!g" \ -e "s!%%HTML_MONO_SIZE%%!$html_mono_size!g" \ -e "s!%%HTML_MONO_GZ_SIZE%%!$html_mono_gz_size!g" \ -e "s!%%HTML_NODE_TGZ_SIZE%%!$html_node_tgz_size!g" \ -e "s!%%HTML_SECTION_TGZ_SIZE%%!$html_section_tgz_size!g" \ -e "s!%%HTML_CHAPTER_TGZ_SIZE%%!$html_chapter_tgz_size!g" \ -e "s!%%INFO_TGZ_SIZE%%!$info_tgz_size!g" \ -e "s!%%DVI_GZ_SIZE%%!$dvi_gz_size!g" \ -e "s!%%PDF_SIZE%%!$pdf_size!g" \ -e "s!%%ASCII_SIZE%%!$ascii_size!g" \ -e "s!%%ASCII_GZ_SIZE%%!$ascii_gz_size!g" \ -e "s!%%TEXI_TGZ_SIZE%%!$texi_tgz_size!g" \ -e "s!%%DOCBOOK_HTML_NODE_TGZ_SIZE%%!$html_node_db_tgz_size!g" \ -e "s!%%DOCBOOK_ASCII_SIZE%%!$docbook_ascii_size!g" \ -e "s!%%DOCBOOK_PDF_SIZE%%!$docbook_pdf_size!g" \ -e "s!%%DOCBOOK_XML_SIZE%%!$docbook_xml_size!g" \ -e "s!%%DOCBOOK_XML_GZ_SIZE%%!$docbook_xml_gz_size!g" \ -e "s,%%SCRIPTURL%%,$scripturl,g" \ -e "s!%%SCRIPTNAME%%!$prog!g" \ -e "$CONDS" \ $GENDOCS_TEMPLATE_DIR/gendocs_template >"$outdir/index.html" echo "Done, see $outdir/ subdirectory for new files." # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-end: "$" # End: �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/build-aux/git-version-gen�������������������������������������������������������������0000755�0000000�0000000�00000017602�14004621310�015002� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh # Print a version string. scriptversion=2019-10-13.15; # UTC # Copyright (C) 2007-2021 Free Software Foundation, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # This script is derived from GIT-VERSION-GEN from GIT: https://git-scm.com/. # It may be run two ways: # - from a git repository in which the "git describe" command below # produces useful output (thus requiring at least one signed tag) # - from a non-git-repo directory containing a .tarball-version file, which # presumes this script is invoked like "./git-version-gen .tarball-version". # In order to use intra-version strings in your project, you will need two # separate generated version string files: # # .tarball-version - present only in a distribution tarball, and not in # a checked-out repository. Created with contents that were learned at # the last time autoconf was run, and used by git-version-gen. Must not # be present in either $(srcdir) or $(builddir) for git-version-gen to # give accurate answers during normal development with a checked out tree, # but must be present in a tarball when there is no version control system. # Therefore, it cannot be used in any dependencies. GNUmakefile has # hooks to force a reconfigure at distribution time to get the value # correct, without penalizing normal development with extra reconfigures. # # .version - present in a checked-out repository and in a distribution # tarball. Usable in dependencies, particularly for files that don't # want to depend on config.h but do want to track version changes. # Delete this file prior to any autoconf run where you want to rebuild # files to pick up a version string change; and leave it stale to # minimize rebuild time after unrelated changes to configure sources. # # As with any generated file in a VC'd directory, you should add # /.version to .gitignore, so that you don't accidentally commit it. # .tarball-version is never generated in a VC'd directory, so needn't # be listed there. # # Use the following line in your configure.ac, so that $(VERSION) will # automatically be up-to-date each time configure is run (and note that # since configure.ac no longer includes a version string, Makefile rules # should not depend on configure.ac for version updates). # # AC_INIT([GNU project], # m4_esyscmd([build-aux/git-version-gen .tarball-version]), # [bug-project@example]) # # Then use the following lines in your Makefile.am, so that .version # will be present for dependencies, and so that .version and # .tarball-version will exist in distribution tarballs. # # EXTRA_DIST = $(top_srcdir)/.version # BUILT_SOURCES = $(top_srcdir)/.version # $(top_srcdir)/.version: # echo $(VERSION) > $@-t && mv $@-t $@ # dist-hook: # echo $(VERSION) > $(distdir)/.tarball-version me=$0 year=`expr "$scriptversion" : '\([^-]*\)'` version="git-version-gen $scriptversion Copyright $year Free Software Foundation, Inc. There is NO warranty. You may redistribute this software under the terms of the GNU General Public License. For more information about these matters, see the files named COPYING." usage="\ Usage: $me [OPTION]... \$srcdir/.tarball-version [TAG-NORMALIZATION-SED-SCRIPT] Print a version string. Options: --prefix PREFIX prefix of git tags (default 'v') --fallback VERSION fallback version to use if \"git --version\" fails --help display this help and exit --version output version information and exit Running without arguments will suffice in most cases." prefix=v fallback= while test $# -gt 0; do case $1 in --help) echo "$usage"; exit 0;; --version) echo "$version"; exit 0;; --prefix) shift; prefix=${1?};; --fallback) shift; fallback=${1?};; -*) echo "$0: Unknown option '$1'." >&2 echo "$0: Try '--help' for more information." >&2 exit 1;; *) if test "x$tarball_version_file" = x; then tarball_version_file="$1" elif test "x$tag_sed_script" = x; then tag_sed_script="$1" else echo "$0: extra non-option argument '$1'." >&2 exit 1 fi;; esac shift done if test "x$tarball_version_file" = x; then echo "$usage" exit 1 fi tag_sed_script="${tag_sed_script:-s/x/x/}" nl=' ' # Avoid meddling by environment variable of the same name. v= v_from_git= # First see if there is a tarball-only version file. # then try "git describe", then default. if test -f $tarball_version_file then v=`cat $tarball_version_file` || v= case $v in *$nl*) v= ;; # reject multi-line output esac test "x$v" = x \ && echo "$0: WARNING: $tarball_version_file is damaged" 1>&2 fi if test "x$v" != x then : # use $v # Otherwise, if there is at least one git commit involving the working # directory, and "git describe" output looks sensible, use that to # derive a version string. elif test "`git log -1 --pretty=format:x . 2>&1`" = x \ && v=`git describe --abbrev=4 --match="$prefix*" HEAD 2>/dev/null \ || git describe --abbrev=4 HEAD 2>/dev/null` \ && v=`printf '%s\n' "$v" | sed "$tag_sed_script"` \ && case $v in $prefix[0-9]*) ;; *) (exit 1) ;; esac then # Is this a new git that lists number of commits since the last # tag or the previous older version that did not? # Newer: v6.10-77-g0f8faeb # Older: v6.10-g0f8faeb vprefix=`expr "X$v" : 'X\(.*\)-g[^-]*$'` || vprefix=$v case $vprefix in *-*) : git describe is probably okay three part flavor ;; *) : git describe is older two part flavor # Recreate the number of commits and rewrite such that the # result is the same as if we were using the newer version # of git describe. vtag=`echo "$v" | sed 's/-.*//'` commit_list=`git rev-list "$vtag"..HEAD 2>/dev/null` \ || { commit_list=failed; echo "$0: WARNING: git rev-list failed" 1>&2; } numcommits=`echo "$commit_list" | wc -l` v=`echo "$v" | sed "s/\(.*\)-\(.*\)/\1-$numcommits-\2/"`; test "$commit_list" = failed && v=UNKNOWN ;; esac # Change the penultimate "-" to ".", for version-comparing tools. # Remove the "g" to save a byte. v=`echo "$v" | sed 's/-\([^-]*\)-g\([^-]*\)$/.\1-\2/'`; v_from_git=1 elif test "x$fallback" = x || git --version >/dev/null 2>&1; then v=UNKNOWN else v=$fallback fi v=`echo "$v" |sed "s/^$prefix//"` # Test whether to append the "-dirty" suffix only if the version # string we're using came from git. I.e., skip the test if it's "UNKNOWN" # or if it came from .tarball-version. if test "x$v_from_git" != x; then # Don't declare a version "dirty" merely because a timestamp has changed. git update-index --refresh > /dev/null 2>&1 dirty=`exec 2>/dev/null;git diff-index --name-only HEAD` || dirty= case "$dirty" in '') ;; *) # Append the suffix only if there isn't one already. case $v in *-dirty) ;; *) v="$v-dirty" ;; esac ;; esac fi # Omit the trailing newline, so that m4_esyscmd can use the result directly. printf %s "$v" # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: ������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/build-aux/announce-gen����������������������������������������������������������������0000755�0000000�0000000�00000040753�14004621310�014345� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh #! -*-perl-*- # Generate a release announcement message. # Copyright (C) 2002-2021 Free Software Foundation, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # Written by Jim Meyering # This is a prologue that allows to run a perl script as an executable # on systems that are compliant to a POSIX version before POSIX:2017. # On such systems, the usual invocation of an executable through execlp() # or execvp() fails with ENOEXEC if it is a script that does not start # with a #! line. The script interpreter mentioned in the #! line has # to be /bin/sh, because on GuixSD systems that is the only program that # has a fixed file name. The second line is essential for perl and is # also useful for editing this file in Emacs. The next two lines below # are valid code in both sh and perl. When executed by sh, they re-execute # the script through the perl program found in $PATH. The '-x' option # is essential as well; without it, perl would re-execute the script # through /bin/sh. When executed by perl, the next two lines are a no-op. eval 'exec perl -wSx "$0" "$@"' if 0; my $VERSION = '2020-05-10 16:13'; # UTC # The definition above must lie within the first 8 lines in order # for the Emacs time-stamp write hook (at end) to update it. # If you change this file with Emacs, please let the write hook # do its job. Otherwise, update this string manually. use strict; use Getopt::Long; use POSIX qw(strftime); (my $ME = $0) =~ s|.*/||; my %valid_release_types = map {$_ => 1} qw (alpha beta stable); my @archive_suffixes = qw (tar.gz tar.bz2 tar.lz tar.lzma tar.xz); my %digest_classes = ( 'md5' => (eval { require Digest::MD5; } and 'Digest::MD5'), 'sha1' => ((eval { require Digest::SHA; } and 'Digest::SHA') or (eval { require Digest::SHA1; } and 'Digest::SHA1')) ); my $srcdir = '.'; sub usage ($) { my ($exit_code) = @_; my $STREAM = ($exit_code == 0 ? *STDOUT : *STDERR); if ($exit_code != 0) { print $STREAM "Try '$ME --help' for more information.\n"; } else { my @types = sort keys %valid_release_types; print $STREAM <<EOF; Usage: $ME [OPTIONS] Generate an announcement message. Run this from builddir. OPTIONS: These options must be specified: --release-type=TYPE TYPE must be one of @types --package-name=PACKAGE_NAME --previous-version=VER --current-version=VER --gpg-key-id=ID The GnuPG ID of the key used to sign the tarballs --url-directory=URL_DIR The following are optional: --news=NEWS_FILE include the NEWS section about this release from this NEWS_FILE; accumulates. --srcdir=DIR where to find the NEWS_FILEs (default: $srcdir) --bootstrap-tools=TOOL_LIST a comma-separated list of tools, e.g., autoconf,automake,bison,gnulib --gnulib-version=VERSION report VERSION as the gnulib version, where VERSION is the result of running git describe in the gnulib source directory. required if gnulib is in TOOL_LIST. --no-print-checksums do not emit MD5 or SHA1 checksums --archive-suffix=SUF add SUF to the list of archive suffixes --mail-headers=HEADERS a space-separated list of mail headers, e.g., To: x\@example.com Cc: y-announce\@example.com,... --help display this help and exit --version output version information and exit EOF } exit $exit_code; } =item C<%size> = C<sizes (@file)> Compute the sizes of the C<@file> and return them as a hash. Return C<undef> if one of the computation failed. =cut sub sizes (@) { my (@file) = @_; my $fail = 0; my %res; foreach my $f (@file) { my $cmd = "du -h $f"; my $t = `$cmd`; # FIXME-someday: give a better diagnostic, a la $PROCESS_STATUS $@ and (warn "command failed: '$cmd'\n"), $fail = 1; chomp $t; $t =~ s/^\s*([\d.]+[MkK]).*/${1}B/; $res{$f} = $t; } return $fail ? undef : %res; } =item C<print_locations ($title, \@url, \%size, @file) Print a section C<$title> dedicated to the list of <@file>, which sizes are stored in C<%size>, and which are available from the C<@url>. =cut sub print_locations ($\@\%@) { my ($title, $url, $size, @file) = @_; print "Here are the $title:\n"; foreach my $url (@{$url}) { for my $file (@file) { print " $url/$file"; print " (", $$size{$file}, ")" if exists $$size{$file}; print "\n"; } } print "\n"; } =item C<print_checksums (@file) Print the MD5 and SHA1 signature section for each C<@file>. =cut sub print_checksums (@) { my (@file) = @_; print "Here are the MD5 and SHA1 checksums:\n"; print "\n"; foreach my $meth (qw (md5 sha1)) { my $class = $digest_classes{$meth} or next; foreach my $f (@file) { open IN, '<', $f or die "$ME: $f: cannot open for reading: $!\n"; binmode IN; my $dig = $class->new->addfile(*IN)->hexdigest; close IN; print "$dig $f\n"; } } print "\n"; } =item C<print_news_deltas ($news_file, $prev_version, $curr_version) Print the section of the NEWS file C<$news_file> addressing changes between versions C<$prev_version> and C<$curr_version>. =cut sub print_news_deltas ($$$) { my ($news_file, $prev_version, $curr_version) = @_; my $news_name = $news_file; $news_name =~ s|^\Q$srcdir\E/||; print "\n$news_name\n\n"; # Print all lines from $news_file, starting with the first one # that mentions $curr_version up to but not including # the first occurrence of $prev_version. my $in_items; my $re_prefix = qr/(?:\* )?(?:Noteworthy c|Major c|C)(?i:hanges)/; my $found_news; open NEWS, '<', $news_file or die "$ME: $news_file: cannot open for reading: $!\n"; while (defined (my $line = <NEWS>)) { if ( ! $in_items) { # Match lines like these: # * Major changes in release 5.0.1: # * Noteworthy changes in release 6.6 (2006-11-22) [stable] $line =~ /^$re_prefix.*(?:[^\d.]|$)\Q$curr_version\E(?:[^\d.]|$)/o or next; $in_items = 1; print $line; } else { # This regexp must not match version numbers in NEWS items. # For example, they might well say "introduced in 4.5.5", # and we don't want that to match. $line =~ /^$re_prefix.*(?:[^\d.]|$)\Q$prev_version\E(?:[^\d.]|$)/o and last; print $line; $line =~ /\S/ and $found_news = 1; } } close NEWS; $in_items or die "$ME: $news_file: no matching lines for '$curr_version'\n"; $found_news or die "$ME: $news_file: no news item found for '$curr_version'\n"; } sub print_changelog_deltas ($$) { my ($package_name, $prev_version) = @_; # Print new ChangeLog entries. # First find all CVS-controlled ChangeLog files. use File::Find; my @changelog; find ({wanted => sub {$_ eq 'ChangeLog' && -d 'CVS' and push @changelog, $File::Find::name}}, '.'); # If there are no ChangeLog files, we're done. @changelog or return; my %changelog = map {$_ => 1} @changelog; # Reorder the list of files so that if there are ChangeLog # files in the specified directories, they're listed first, # in this order: my @dir = qw ( . src lib m4 config doc ); # A typical @changelog array might look like this: # ./ChangeLog # ./po/ChangeLog # ./m4/ChangeLog # ./lib/ChangeLog # ./doc/ChangeLog # ./config/ChangeLog my @reordered; foreach my $d (@dir) { my $dot_slash = $d eq '.' ? $d : "./$d"; my $target = "$dot_slash/ChangeLog"; delete $changelog{$target} and push @reordered, $target; } # Append any remaining ChangeLog files. push @reordered, sort keys %changelog; # Remove leading './'. @reordered = map { s!^\./!!; $_ } @reordered; print "\nChangeLog entries:\n\n"; # print join ("\n", @reordered), "\n"; $prev_version =~ s/\./_/g; my $prev_cvs_tag = "\U$package_name\E-$prev_version"; my $cmd = "cvs -n diff -u -r$prev_cvs_tag -rHEAD @reordered"; open DIFF, '-|', $cmd or die "$ME: cannot run '$cmd': $!\n"; # Print two types of lines, making minor changes: # Lines starting with '+++ ', e.g., # +++ ChangeLog 22 Feb 2003 16:52:51 -0000 1.247 # and those starting with '+'. # Don't print the others. my $prev_printed_line_empty = 1; while (defined (my $line = <DIFF>)) { if ($line =~ /^\+\+\+ /) { my $separator = "*"x70 ."\n"; $line =~ s///; $line =~ s/\s.*//; $prev_printed_line_empty or print "\n"; print $separator, $line, $separator; } elsif ($line =~ /^\+/) { $line =~ s///; print $line; $prev_printed_line_empty = ($line =~ /^$/); } } close DIFF; # The exit code should be 1. # Allow in case there are no modified ChangeLog entries. $? == 256 || $? == 128 or warn "warning: '$cmd' had unexpected exit code or signal ($?)\n"; } sub get_tool_versions ($$) { my ($tool_list, $gnulib_version) = @_; @$tool_list or return (); my $fail; my @tool_version_pair; foreach my $t (@$tool_list) { if ($t eq 'gnulib') { push @tool_version_pair, ucfirst $t . ' ' . $gnulib_version; next; } # Assume that the last "word" on the first line of # 'tool --version' output is the version string. my ($first_line, undef) = split ("\n", `$t --version`); if ($first_line =~ /.* (\d[\w.-]+)$/) { $t = ucfirst $t; push @tool_version_pair, "$t $1"; } else { defined $first_line and $first_line = ''; warn "$t: unexpected --version output\n:$first_line"; $fail = 1; } } $fail and exit 1; return @tool_version_pair; } { # Use the C locale so that, for instance, "du" does not # print "1,2" instead of "1.2", which would confuse our regexps. $ENV{LC_ALL} = "C"; my $mail_headers; my $release_type; my $package_name; my $prev_version; my $curr_version; my $gpg_key_id; my @url_dir_list; my @news_file; my $bootstrap_tools; my $gnulib_version; my $print_checksums_p = 1; # Reformat the warnings before displaying them. local $SIG{__WARN__} = sub { my ($msg) = @_; # Warnings from GetOptions. $msg =~ s/Option (\w)/option --$1/; warn "$ME: $msg"; }; GetOptions ( 'mail-headers=s' => \$mail_headers, 'release-type=s' => \$release_type, 'package-name=s' => \$package_name, 'previous-version=s' => \$prev_version, 'current-version=s' => \$curr_version, 'gpg-key-id=s' => \$gpg_key_id, 'url-directory=s' => \@url_dir_list, 'news=s' => \@news_file, 'srcdir=s' => \$srcdir, 'bootstrap-tools=s' => \$bootstrap_tools, 'gnulib-version=s' => \$gnulib_version, 'print-checksums!' => \$print_checksums_p, 'archive-suffix=s' => \@archive_suffixes, help => sub { usage 0 }, version => sub { print "$ME version $VERSION\n"; exit }, ) or usage 1; my $fail = 0; # Ensure that each required option is specified. $release_type or (warn "release type not specified\n"), $fail = 1; $package_name or (warn "package name not specified\n"), $fail = 1; $prev_version or (warn "previous version string not specified\n"), $fail = 1; $curr_version or (warn "current version string not specified\n"), $fail = 1; $gpg_key_id or (warn "GnuPG key ID not specified\n"), $fail = 1; @url_dir_list or (warn "URL directory name(s) not specified\n"), $fail = 1; my @tool_list = split ',', $bootstrap_tools if $bootstrap_tools; grep (/^gnulib$/, @tool_list) ^ defined $gnulib_version and (warn "when specifying gnulib as a tool, you must also specify\n" . "--gnulib-version=V, where V is the result of running git describe\n" . "in the gnulib source directory.\n"), $fail = 1; !$release_type || exists $valid_release_types{$release_type} or (warn "'$release_type': invalid release type\n"), $fail = 1; @ARGV and (warn "too many arguments:\n", join ("\n", @ARGV), "\n"), $fail = 1; $fail and usage 1; my $my_distdir = "$package_name-$curr_version"; my $xd = "$package_name-$prev_version-$curr_version.xdelta"; my @candidates = map { "$my_distdir.$_" } @archive_suffixes; my @tarballs = grep {-f $_} @candidates; @tarballs or die "$ME: none of " . join(', ', @candidates) . " were found\n"; my @sizable = @tarballs; -f $xd and push @sizable, $xd; my %size = sizes (@sizable); %size or exit 1; my $headers = ''; if (defined $mail_headers) { ($headers = $mail_headers) =~ s/\s+(\S+:)/\n$1/g; $headers .= "\n"; } # The markup is escaped as <\# so that when this script is sent by # mail (or part of a diff), Gnus is not triggered. print <<EOF; ${headers}Subject: $my_distdir released [$release_type] <\#secure method=pgpmime mode=sign> FIXME: put comments here EOF if (@url_dir_list == 1 && @tarballs == 1) { # When there's only one tarball and one URL, use a more concise form. my $m = "$url_dir_list[0]/$tarballs[0]"; print "Here are the compressed sources and a GPG detached signature[*]:\n" . " $m\n" . " $m.sig\n\n"; } else { print_locations ("compressed sources", @url_dir_list, %size, @tarballs); -f $xd and print_locations ("xdelta diffs (useful? if so, " . "please tell bug-gnulib\@gnu.org)", @url_dir_list, %size, $xd); my @sig_files = map { "$_.sig" } @tarballs; print_locations ("GPG detached signatures[*]", @url_dir_list, %size, @sig_files); } if ($url_dir_list[0] =~ "gnu\.org") { print "Use a mirror for higher download bandwidth:\n"; if (@tarballs == 1 && $url_dir_list[0] =~ m!https://ftp\.gnu\.org/gnu/!) { (my $m = "$url_dir_list[0]/$tarballs[0]") =~ s!https://ftp\.gnu\.org/gnu/!https://ftpmirror\.gnu\.org/!; print " $m\n" . " $m.sig\n\n"; } else { print " https://www.gnu.org/order/ftp.html\n\n"; } } $print_checksums_p and print_checksums (@sizable); print <<EOF; [*] Use a .sig file to verify that the corresponding file (without the .sig suffix) is intact. First, be sure to download both the .sig file and the corresponding tarball. Then, run a command like this: gpg --verify $tarballs[0].sig If that command fails because you don't have the required public key, then run this command to import it: gpg --keyserver keys.gnupg.net --recv-keys $gpg_key_id and rerun the 'gpg --verify' command. EOF my @tool_versions = get_tool_versions (\@tool_list, $gnulib_version); @tool_versions and print "\nThis release was bootstrapped with the following tools:", join ('', map {"\n $_"} @tool_versions), "\n"; print_news_deltas ($_, $prev_version, $curr_version) foreach @news_file; $release_type eq 'stable' or print_changelog_deltas ($package_name, $prev_version); exit 0; } ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## mode: perl ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## perl-extra-newline-before-brace: t ## perl-merge-trailing-else: nil ## eval: (add-hook 'before-save-hook 'time-stamp) ## time-stamp-line-limit: 50 ## time-stamp-start: "my $VERSION = '" ## time-stamp-format: "%:y-%02m-%02d %02H:%02M" ## time-stamp-time-zone: "UTC0" ## time-stamp-end: "'; # UTC" ## End: ���������������������autoconf-2.71/build-aux/gnupload��������������������������������������������������������������������0000755�0000000�0000000�00000032113�14004621310�013570� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh # Sign files and upload them. scriptversion=2018-05-19.18; # UTC # Copyright (C) 2004-2021 Free Software Foundation, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # Originally written by Alexandre Duret-Lutz <adl@gnu.org>. # The master copy of this file is maintained in the gnulib Git repository. # Please send bug reports and feature requests to bug-gnulib@gnu.org. set -e GPG=gpg # Choose the proper version of gpg, so as to avoid a # "gpg-agent is not available in this session" error # when gpg-agent is version 2 but gpg is still version 1. # FIXME-2020: remove, once all major distros ship gpg version 2 as /usr/bin/gpg gpg_agent_version=`(gpg-agent --version) 2>/dev/null | sed -e '2,$d' -e 's/^[^0-9]*//'` case "$gpg_agent_version" in 2.*) gpg_version=`(gpg --version) 2>/dev/null | sed -e '2,$d' -e 's/^[^0-9]*//'` case "$gpg_version" in 1.*) if (type gpg2) >/dev/null 2>/dev/null; then # gpg2 is present. GPG=gpg2 else # gpg2 is missing. Ubuntu users should install the package 'gnupg2'. echo "WARNING: Using 'gpg', which is too old. You should install 'gpg2'." 1>&2 fi ;; esac ;; esac GPG="${GPG} --batch --no-tty" conffile=.gnuploadrc to= dry_run=false replace= symlink_files= delete_files= delete_symlinks= collect_var= dbg= nl=' ' usage="Usage: $0 [OPTION]... [CMD] FILE... [[CMD] FILE...] Sign all FILES, and process them at the destinations specified with --to. If CMD is not given, it defaults to uploading. See examples below. Commands: --delete delete FILES from destination --symlink create symbolic links --rmsymlink remove symbolic links -- treat the remaining arguments as files to upload Options: --to DEST specify a destination DEST for FILES (multiple --to options are allowed) --user NAME sign with key NAME --replace allow replacements of existing files --symlink-regex[=EXPR] use sed script EXPR to compute symbolic link names -n, --dry-run do nothing, show what would have been done (including the constructed directive file) --version output version information and exit -h, --help print this help text and exit If --symlink-regex is given without EXPR, then the link target name is created by replacing the version information with '-latest', e.g.: foo-1.3.4.tar.gz -> foo-latest.tar.gz Recognized destinations are: alpha.gnu.org:DIRECTORY savannah.gnu.org:DIRECTORY savannah.nongnu.org:DIRECTORY ftp.gnu.org:DIRECTORY build directive files and upload files by FTP download.gnu.org.ua:{alpha|ftp}/DIRECTORY build directive files and upload files by SFTP [user@]host:DIRECTORY upload files with scp Options and commands are applied in order. If the file $conffile exists in the current working directory, its contents are prepended to the actual command line options. Use this to keep your defaults. Comments (#) and empty lines in $conffile are allowed. <https://www.gnu.org/prep/maintain/html_node/Automated-FTP-Uploads.html> gives some further background. Examples: 1. Upload foobar-1.0.tar.gz to ftp.gnu.org: gnupload --to ftp.gnu.org:foobar foobar-1.0.tar.gz 2. Upload foobar-1.0.tar.gz and foobar-1.0.tar.xz to ftp.gnu.org: gnupload --to ftp.gnu.org:foobar foobar-1.0.tar.gz foobar-1.0.tar.xz 3. Same as above, and also create symbolic links to foobar-latest.tar.*: gnupload --to ftp.gnu.org:foobar \\ --symlink-regex \\ foobar-1.0.tar.gz foobar-1.0.tar.xz 4. Create a symbolic link foobar-latest.tar.gz -> foobar-1.0.tar.gz and likewise for the corresponding .sig file: gnupload --to ftp.gnu.org:foobar \\ --symlink foobar-1.0.tar.gz foobar-latest.tar.gz \\ foobar-1.0.tar.gz.sig foobar-latest.tar.gz.sig or (equivalent): gnupload --to ftp.gnu.org:foobar \\ --symlink foobar-1.0.tar.gz foobar-latest.tar.gz \\ --symlink foobar-1.0.tar.gz.sig foobar-latest.tar.gz.sig 5. Upload foobar-0.9.90.tar.gz to two sites: gnupload --to alpha.gnu.org:foobar \\ --to sources.redhat.com:~ftp/pub/foobar \\ foobar-0.9.90.tar.gz 6. Delete oopsbar-0.9.91.tar.gz and upload foobar-0.9.91.tar.gz (the -- terminates the list of files to delete): gnupload --to alpha.gnu.org:foobar \\ --to sources.redhat.com:~ftp/pub/foobar \\ --delete oopsbar-0.9.91.tar.gz \\ -- foobar-0.9.91.tar.gz gnupload executes a program ncftpput to do the transfers; if you don't happen to have an ncftp package installed, the ncftpput-ftp script in the build-aux/ directory of the gnulib package (https://savannah.gnu.org/projects/gnulib) may serve as a replacement. Send patches and bug reports to <bug-gnulib@gnu.org>." # Read local configuration file if test -r "$conffile"; then echo "$0: Reading configuration file $conffile" conf=`sed 's/#.*$//;/^$/d' "$conffile" | tr "\015$nl" ' '` eval set x "$conf \"\$@\"" shift fi while test -n "$1"; do case $1 in -*) collect_var= case $1 in -h | --help) echo "$usage" exit $? ;; --to) if test -z "$2"; then echo "$0: Missing argument for --to" 1>&2 exit 1 elif echo "$2" | grep 'ftp-upload\.gnu\.org' >/dev/null; then echo "$0: Use ftp.gnu.org:PKGNAME or alpha.gnu.org:PKGNAME" >&2 echo "$0: for the destination, not ftp-upload.gnu.org (which" >&2 echo "$0: is used for direct ftp uploads, not with gnupload)." >&2 echo "$0: See --help and its examples if need be." >&2 exit 1 else to="$to $2" shift fi ;; --user) if test -z "$2"; then echo "$0: Missing argument for --user" 1>&2 exit 1 else GPG="$GPG --local-user $2" shift fi ;; --delete) collect_var=delete_files ;; --replace) replace="replace: true" ;; --rmsymlink) collect_var=delete_symlinks ;; --symlink-regex=*) symlink_expr=`expr "$1" : '[^=]*=\(.*\)'` ;; --symlink-regex) symlink_expr='s|-[0-9][0-9\.]*\(-[0-9][0-9]*\)\{0,1\}\.|-latest.|' ;; --symlink) collect_var=symlink_files ;; -n | --dry-run) dry_run=: ;; --version) echo "gnupload $scriptversion" exit $? ;; --) shift break ;; -*) echo "$0: Unknown option '$1', try '$0 --help'" 1>&2 exit 1 ;; esac ;; *) if test -z "$collect_var"; then break else eval "$collect_var=\"\$$collect_var $1\"" fi ;; esac shift done dprint() { echo "Running $* ..." } if $dry_run; then dbg=dprint fi if test -z "$to"; then echo "$0: Missing destination sites" >&2 exit 1 fi if test -n "$symlink_files"; then x=`echo "$symlink_files" | sed 's/[^ ]//g;s/ //g'` if test -n "$x"; then echo "$0: Odd number of symlink arguments" >&2 exit 1 fi fi if test $# = 0; then if test -z "${symlink_files}${delete_files}${delete_symlinks}"; then echo "$0: No file to upload" 1>&2 exit 1 fi else # Make sure all files exist. We don't want to ask # for the passphrase if the script will fail. for file do if test ! -f $file; then echo "$0: Cannot find '$file'" 1>&2 exit 1 elif test -n "$symlink_expr"; then linkname=`echo $file | sed "$symlink_expr"` if test -z "$linkname"; then echo "$0: symlink expression produces empty results" >&2 exit 1 elif test "$linkname" = $file; then echo "$0: symlink expression does not alter file name" >&2 exit 1 fi fi done fi # Make sure passphrase is not exported in the environment. unset passphrase unset passphrase_fd_0 GNUPGHOME=${GNUPGHOME:-$HOME/.gnupg} # Reset PATH to be sure that echo is a built-in. We will later use # 'echo $passphrase' to output the passphrase, so it is important that # it is a built-in (third-party programs tend to appear in 'ps' # listings with their arguments...). # Remember this script runs with 'set -e', so if echo is not built-in # it will exit now. if $dry_run || grep -q "^use-agent" $GNUPGHOME/gpg.conf; then :; else PATH=/empty echo -n "Enter GPG passphrase: " stty -echo read -r passphrase stty echo echo passphrase_fd_0="--passphrase-fd 0" fi if test $# -ne 0; then for file do echo "Signing $file ..." rm -f $file.sig echo "$passphrase" | $dbg $GPG $passphrase_fd_0 -ba -o $file.sig $file done fi # mkdirective DESTDIR BASE FILE STMT # Arguments: See upload, below mkdirective () { stmt="$4" if test -n "$3"; then stmt=" filename: $3$stmt" fi cat >${2}.directive<<EOF version: 1.2 directory: $1 comment: gnupload v. $scriptversion$stmt EOF if $dry_run; then echo "File ${2}.directive:" cat ${2}.directive echo "File ${2}.directive:" | sed 's/./-/g' fi } mksymlink () { while test $# -ne 0 do echo "symlink: $1 $2" shift shift done } # upload DEST DESTDIR BASE FILE STMT FILES # Arguments: # DEST Destination site; # DESTDIR Destination directory; # BASE Base name for the directive file; # FILE Name of the file to distribute (may be empty); # STMT Additional statements for the directive file; # FILES List of files to upload. upload () { dest=$1 destdir=$2 base=$3 file=$4 stmt=$5 files=$6 rm -f $base.directive $base.directive.asc case $dest in alpha.gnu.org:*) mkdirective "$destdir" "$base" "$file" "$stmt" echo "$passphrase" | $dbg $GPG $passphrase_fd_0 --clearsign $base.directive $dbg ncftpput ftp-upload.gnu.org /incoming/alpha $files $base.directive.asc ;; ftp.gnu.org:*) mkdirective "$destdir" "$base" "$file" "$stmt" echo "$passphrase" | $dbg $GPG $passphrase_fd_0 --clearsign $base.directive $dbg ncftpput ftp-upload.gnu.org /incoming/ftp $files $base.directive.asc ;; savannah.gnu.org:*) if test -z "$files"; then echo "$0: warning: standalone directives not applicable for $dest" >&2 fi $dbg ncftpput savannah.gnu.org /incoming/savannah/$destdir $files ;; savannah.nongnu.org:*) if test -z "$files"; then echo "$0: warning: standalone directives not applicable for $dest" >&2 fi $dbg ncftpput savannah.nongnu.org /incoming/savannah/$destdir $files ;; download.gnu.org.ua:alpha/*|download.gnu.org.ua:ftp/*) destdir_p1=`echo "$destdir" | sed 's,^[^/]*/,,'` destdir_topdir=`echo "$destdir" | sed 's,/.*,,'` mkdirective "$destdir_p1" "$base" "$file" "$stmt" echo "$passphrase" | $dbg $GPG $passphrase_fd_0 --clearsign $base.directive for f in $files $base.directive.asc do echo put $f done | $dbg sftp -b - puszcza.gnu.org.ua:/incoming/$destdir_topdir ;; /*) dest_host=`echo "$dest" | sed 's,:.*,,'` mkdirective "$destdir" "$base" "$file" "$stmt" echo "$passphrase" | $dbg $GPG $passphrase_fd_0 --clearsign $base.directive $dbg cp $files $base.directive.asc $dest_host ;; *) if test -z "$files"; then echo "$0: warning: standalone directives not applicable for $dest" >&2 fi $dbg scp $files $dest ;; esac rm -f $base.directive $base.directive.asc } ##### # Process any standalone directives stmt= if test -n "$symlink_files"; then stmt="$stmt `mksymlink $symlink_files`" fi for file in $delete_files do stmt="$stmt archive: $file" done for file in $delete_symlinks do stmt="$stmt rmsymlink: $file" done if test -n "$stmt"; then for dest in $to do destdir=`echo $dest | sed 's/[^:]*://'` upload "$dest" "$destdir" "`hostname`-$$" "" "$stmt" done fi # Process actual uploads for dest in $to do for file do echo "Uploading $file to $dest ..." stmt= # # allowing file replacement is all or nothing. if test -n "$replace"; then stmt="$stmt $replace" fi # files="$file $file.sig" destdir=`echo $dest | sed 's/[^:]*://'` if test -n "$symlink_expr"; then linkname=`echo $file | sed "$symlink_expr"` stmt="$stmt symlink: $file $linkname symlink: $file.sig $linkname.sig" fi upload "$dest" "$destdir" "$file" "$file" "$stmt" "$files" done done exit 0 # Local variables: # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC0" # time-stamp-end: "; # UTC" # End: �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/build-aux/gitlog-to-changelog���������������������������������������������������������0000755�0000000�0000000�00000040211�14004621310�015607� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#!/bin/sh #! -*-perl-*- # Convert git log output to ChangeLog format. # Copyright (C) 2008-2021 Free Software Foundation, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # # Written by Jim Meyering # This is a prologue that allows to run a perl script as an executable # on systems that are compliant to a POSIX version before POSIX:2017. # On such systems, the usual invocation of an executable through execlp() # or execvp() fails with ENOEXEC if it is a script that does not start # with a #! line. The script interpreter mentioned in the #! line has # to be /bin/sh, because on GuixSD systems that is the only program that # has a fixed file name. The second line is essential for perl and is # also useful for editing this file in Emacs. The next two lines below # are valid code in both sh and perl. When executed by sh, they re-execute # the script through the perl program found in $PATH. The '-x' option # is essential as well; without it, perl would re-execute the script # through /bin/sh. When executed by perl, the next two lines are a no-op. eval 'exec perl -wSx "$0" "$@"' if 0; my $VERSION = '2020-04-04 15:07'; # UTC # The definition above must lie within the first 8 lines in order # for the Emacs time-stamp write hook (at end) to update it. # If you change this file with Emacs, please let the write hook # do its job. Otherwise, update this string manually. use strict; use warnings; use Getopt::Long; use POSIX qw(strftime); (my $ME = $0) =~ s|.*/||; # use File::Coda; # https://meyering.net/code/Coda/ END { defined fileno STDOUT or return; close STDOUT and return; warn "$ME: failed to close standard output: $!\n"; $? ||= 1; } sub usage ($) { my ($exit_code) = @_; my $STREAM = ($exit_code == 0 ? *STDOUT : *STDERR); if ($exit_code != 0) { print $STREAM "Try '$ME --help' for more information.\n"; } else { print $STREAM <<EOF; Usage: $ME [OPTIONS] [ARGS] Convert git log output to ChangeLog format. If present, any ARGS are passed to "git log". To avoid ARGS being parsed as options to $ME, they may be preceded by '--'. OPTIONS: --amend=FILE FILE maps from an SHA1 to perl code (i.e., s/old/new/) that makes a change to SHA1's commit log text or metadata. --append-dot append a dot to the first line of each commit message if there is no other punctuation or blank at the end. --no-cluster never cluster commit messages under the same date/author header; the default is to cluster adjacent commit messages if their headers are the same and neither commit message contains multiple paragraphs. --srcdir=DIR the root of the source tree, from which the .git/ directory can be derived. --since=DATE convert only the logs since DATE; the default is to convert all log entries. --until=DATE convert only the logs older than DATE. --ignore-matching=PAT ignore commit messages whose first lines match PAT. --ignore-line=PAT ignore lines of commit messages that match PAT. --format=FMT set format string for commit subject and body; see 'man git-log' for the list of format metacharacters; the default is '%s%n%b%n' --strip-tab remove one additional leading TAB from commit message lines. --strip-cherry-pick remove data inserted by "git cherry-pick"; this includes the "cherry picked from commit ..." line, and the possible final "Conflicts:" paragraph. --help display this help and exit --version output version information and exit EXAMPLE: $ME --since=2008-01-01 > ChangeLog $ME -- -n 5 foo > last-5-commits-to-branch-foo SPECIAL SYNTAX: The following types of strings are interpreted specially when they appear at the beginning of a log message line. They are not copied to the output. Copyright-paperwork-exempt: Yes Append the "(tiny change)" notation to the usual "date name email" ChangeLog header to mark a change that does not require a copyright assignment. Co-authored-by: Joe User <user\@example.com> List the specified name and email address on a second ChangeLog header, denoting a co-author. Signed-off-by: Joe User <user\@example.com> These lines are simply elided. In a FILE specified via --amend, comment lines (starting with "#") are ignored. FILE must consist of <SHA,CODE+> pairs where SHA is a 40-byte SHA1 (alone on a line) referring to a commit in the current project, and CODE refers to one or more consecutive lines of Perl code. Pairs must be separated by one or more blank line. Here is sample input for use with --amend=FILE, from coreutils: 3a169f4c5d9159283548178668d2fae6fced3030 # fix typo in title: s/all tile types/all file types/ 1379ed974f1fa39b12e2ffab18b3f7a607082202 # Due to a bug in vc-dwim, I mis-attributed a patch by Paul to myself. # Change the author to be Paul. Note the escaped "@": s,Jim .*>,Paul Eggert <eggert\\\@cs.ucla.edu>, EOF } exit $exit_code; } # If the string $S is a well-behaved file name, simply return it. # If it contains white space, quotes, etc., quote it, and return the new string. sub shell_quote($) { my ($s) = @_; if ($s =~ m![^\w+/.,-]!) { # Convert each single quote to '\'' $s =~ s/\'/\'\\\'\'/g; # Then single quote the string. $s = "'$s'"; } return $s; } sub quoted_cmd(@) { return join (' ', map {shell_quote $_} @_); } # Parse file F. # Comment lines (starting with "#") are ignored. # F must consist of <SHA,CODE+> pairs where SHA is a 40-byte SHA1 # (alone on a line) referring to a commit in the current project, and # CODE refers to one or more consecutive lines of Perl code. # Pairs must be separated by one or more blank line. sub parse_amend_file($) { my ($f) = @_; open F, '<', $f or die "$ME: $f: failed to open for reading: $!\n"; my $fail; my $h = {}; my $in_code = 0; my $sha; while (defined (my $line = <F>)) { $line =~ /^\#/ and next; chomp $line; $line eq '' and $in_code = 0, next; if (!$in_code) { $line =~ /^([[:xdigit:]]{40})$/ or (warn "$ME: $f:$.: invalid line; expected an SHA1\n"), $fail = 1, next; $sha = lc $1; $in_code = 1; exists $h->{$sha} and (warn "$ME: $f:$.: duplicate SHA1\n"), $fail = 1, next; } else { $h->{$sha} ||= ''; $h->{$sha} .= "$line\n"; } } close F; $fail and exit 1; return $h; } # git_dir_option $SRCDIR # # From $SRCDIR, the --git-dir option to pass to git (none if $SRCDIR # is undef). Return as a list (0 or 1 element). sub git_dir_option($) { my ($srcdir) = @_; my @res = (); if (defined $srcdir) { my $qdir = shell_quote $srcdir; my $cmd = "cd $qdir && git rev-parse --show-toplevel"; my $qcmd = shell_quote $cmd; my $git_dir = qx($cmd); defined $git_dir or die "$ME: cannot run $qcmd: $!\n"; $? == 0 or die "$ME: $qcmd had unexpected exit code or signal ($?)\n"; chomp $git_dir; push @res, "--git-dir=$git_dir/.git"; } @res; } { my $since_date; my $until_date; my $format_string = '%s%n%b%n'; my $amend_file; my $append_dot = 0; my $cluster = 1; my $ignore_matching; my $ignore_line; my $strip_tab = 0; my $strip_cherry_pick = 0; my $srcdir; GetOptions ( help => sub { usage 0 }, version => sub { print "$ME version $VERSION\n"; exit }, 'since=s' => \$since_date, 'until=s' => \$until_date, 'format=s' => \$format_string, 'amend=s' => \$amend_file, 'append-dot' => \$append_dot, 'cluster!' => \$cluster, 'ignore-matching=s' => \$ignore_matching, 'ignore-line=s' => \$ignore_line, 'strip-tab' => \$strip_tab, 'strip-cherry-pick' => \$strip_cherry_pick, 'srcdir=s' => \$srcdir, ) or usage 1; defined $since_date and unshift @ARGV, "--since=$since_date"; defined $until_date and unshift @ARGV, "--until=$until_date"; # This is a hash that maps an SHA1 to perl code (i.e., s/old/new/) # that makes a correction in the log or attribution of that commit. my $amend_code = defined $amend_file ? parse_amend_file $amend_file : {}; my @cmd = ('git', git_dir_option $srcdir, qw(log --log-size), '--pretty=format:%H:%ct %an <%ae>%n%n'.$format_string, @ARGV); open PIPE, '-|', @cmd or die ("$ME: failed to run '". quoted_cmd (@cmd) ."': $!\n" . "(Is your Git too old? Version 1.5.1 or later is required.)\n"); my $prev_multi_paragraph; my $prev_date_line = ''; my @prev_coauthors = (); my @skipshas = (); while (1) { defined (my $in = <PIPE>) or last; $in =~ /^log size (\d+)$/ or die "$ME:$.: Invalid line (expected log size):\n$in"; my $log_nbytes = $1; my $log; my $n_read = read PIPE, $log, $log_nbytes; $n_read == $log_nbytes or die "$ME:$.: unexpected EOF\n"; # Extract leading hash. my ($sha, $rest) = split ':', $log, 2; defined $sha or die "$ME:$.: malformed log entry\n"; $sha =~ /^[[:xdigit:]]{40}$/ or die "$ME:$.: invalid SHA1: $sha\n"; my $skipflag = 0; if (@skipshas) { foreach(@skipshas) { if ($sha =~ /^$_/) { $skipflag = $_; last; } } } # If this commit's log requires any transformation, do it now. my $code = $amend_code->{$sha}; if (defined $code) { eval 'use Safe'; my $s = new Safe; # Put the unpreprocessed entry into "$_". $_ = $rest; # Let $code operate on it, safely. my $r = $s->reval("$code") or die "$ME:$.:$sha: failed to eval \"$code\":\n$@\n"; # Note that we've used this entry. delete $amend_code->{$sha}; # Update $rest upon success. $rest = $_; } # Remove lines inserted by "git cherry-pick". if ($strip_cherry_pick) { $rest =~ s/^\s*Conflicts:\n.*//sm; $rest =~ s/^\s*\(cherry picked from commit [\da-f]+\)\n//m; } my @line = split /[ \t]*\n/, $rest; my $author_line = shift @line; defined $author_line or die "$ME:$.: unexpected EOF\n"; $author_line =~ /^(\d+) (.*>)$/ or die "$ME:$.: Invalid line " . "(expected date/author/email):\n$author_line\n"; # Format 'Copyright-paperwork-exempt: Yes' as a standard ChangeLog # `(tiny change)' annotation. my $tiny = (grep (/^(?:Copyright-paperwork-exempt|Tiny-change):\s+[Yy]es$/, @line) ? ' (tiny change)' : ''); my $date_line = sprintf "%s %s$tiny\n", strftime ("%Y-%m-%d", localtime ($1)), $2; my @coauthors = grep /^Co-authored-by:.*$/, @line; # Omit meta-data lines we've already interpreted. @line = grep !/^(?:Signed-off-by:[ ].*>$ |Co-authored-by:[ ] |Copyright-paperwork-exempt:[ ] |Tiny-change:[ ] )/x, @line; # Remove leading and trailing blank lines. if (@line) { while ($line[0] =~ /^\s*$/) { shift @line; } while ($line[$#line] =~ /^\s*$/) { pop @line; } } # Handle Emacs gitmerge.el "skipped" commits. # Yes, this should be controlled by an option. So sue me. if ( grep /^(; )?Merge from /, @line ) { my $found = 0; foreach (@line) { if (grep /^The following commit.*skipped:$/, $_) { $found = 1; ## Reset at each merge to reduce chance of false matches. @skipshas = (); next; } if ($found && $_ =~ /^([[:xdigit:]]{7,}) [^ ]/) { push ( @skipshas, $1 ); } } } # Ignore commits that match the --ignore-matching pattern, if specified. if (defined $ignore_matching && @line && $line[0] =~ /$ignore_matching/) { $skipflag = 1; } elsif ($skipflag) { ## Perhaps only warn if a pattern matches more than once? warn "$ME: warning: skipping $sha due to $skipflag\n"; } if (! $skipflag) { if (defined $ignore_line && @line) { @line = grep ! /$ignore_line/, @line; while ($line[$#line] =~ /^\s*$/) { pop @line; } } # Record whether there are two or more paragraphs. my $multi_paragraph = grep /^\s*$/, @line; # Format 'Co-authored-by: A U Thor <email@example.com>' lines in # standard multi-author ChangeLog format. for (@coauthors) { s/^Co-authored-by:\s*/\t /; s/\s*</ </; /<.*?@.*\..*>/ or warn "$ME: warning: missing email address for " . substr ($_, 5) . "\n"; } # If clustering of commit messages has been disabled, if this header # would be different from the previous date/name/etc. header, # or if this or the previous entry consists of two or more paragraphs, # then print the header. if ( ! $cluster || $date_line ne $prev_date_line || "@coauthors" ne "@prev_coauthors" || $multi_paragraph || $prev_multi_paragraph) { $prev_date_line eq '' or print "\n"; print $date_line; @coauthors and print join ("\n", @coauthors), "\n"; } $prev_date_line = $date_line; @prev_coauthors = @coauthors; $prev_multi_paragraph = $multi_paragraph; # If there were any lines if (@line == 0) { warn "$ME: warning: empty commit message:\n $date_line\n"; } else { if ($append_dot) { # If the first line of the message has enough room, then if (length $line[0] < 72) { # append a dot if there is no other punctuation or blank # at the end. $line[0] =~ /[[:punct:]\s]$/ or $line[0] .= '.'; } } # Remove one additional leading TAB from each line. $strip_tab and map { s/^\t// } @line; # Prefix each non-empty line with a TAB. @line = map { length $_ ? "\t$_" : '' } @line; print "\n", join ("\n", @line), "\n"; } } defined ($in = <PIPE>) or last; $in ne "\n" and die "$ME:$.: unexpected line:\n$in"; } close PIPE or die "$ME: error closing pipe from " . quoted_cmd (@cmd) . "\n"; # FIXME-someday: include $PROCESS_STATUS in the diagnostic # Complain about any unused entry in the --amend=F specified file. my $fail = 0; foreach my $sha (keys %$amend_code) { warn "$ME:$amend_file: unused entry: $sha\n"; $fail = 1; } exit $fail; } # Local Variables: # mode: perl # indent-tabs-mode: nil # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-line-limit: 50 # time-stamp-start: "my $VERSION = '" # time-stamp-format: "%:y-%02m-%02d %02H:%02M" # time-stamp-time-zone: "UTC0" # time-stamp-end: "'; # UTC" # End: ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/build-aux/help-extract.pl�������������������������������������������������������������0000644�0000000�0000000�00000017131�14004621270�014776� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# help-extract -- extract --help and --version output from a script. # Copyright (C) 2020-2021 Free Software Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # Written by Zack Weinberg. use strict; use warnings; # File::Spec itself was added in 5.005. # File::Spec::Functions was added in 5.6.1 which is just barely too new. use File::Spec; # This script is not intended to be used directly. It's run by # help2man via wrappers in man/, e.g. man/autoconf.w, as if it were # one of autoconf's executable scripts. It extracts the --help and # --version output of that script from its source form, without # actually running it. The script to work from is set by the wrapper, # and several other parameters are passed down from the Makefile as # environment variables; see parse_args below. # The point of this script is, the preprocessed forms of the # executable scripts, and their wrappers for uninstalled use # (e.g. <build-dir>/{bin,tests}/autoconf) do not need to exist to # generate the corresponding manpages. This is desirable because we # can't put those dependencies in the makefiles without breaking # people's ability to build autoconf from a release tarball without # help2man installed. It also ensures that we will generate manpages # from the current source code and not from an older version of the # script that has already been installed. ## ----------------------------- ## ## Extraction from Perl scripts. ## ## ----------------------------- ## sub eval_qq_no_interpolation ($) { # The argument is expected to be a "double quoted string" including the # leading and trailing delimiters. Returns the text of this string after # processing backslash escapes but NOT interpolation. # / (?<!\\) (?>\\\\)* blah /x means match blah preceded by an # *even* number of backslashes. It would be nice if we could use \K # to exclude the backslashes from the matched text, but that was only # added in Perl 5.10 and we still support back to 5.006. return eval $_[0] =~ s/ (?<!\\) (?>\\\\)* [\$\@] /\\$&/xrg; } sub extract_channeldefs_usage ($) { my ($channeldefs_pm) = @_; my $usage = ""; my $parse_state = 0; local $_; open (my $fh, "<", $channeldefs_pm) or die "$channeldefs_pm: $!\n"; while (<$fh>) { if ($parse_state == 0) { $parse_state = 1 if /^sub usage\b/; } elsif ($parse_state == 1) { if (s/^ return "//) { $parse_state = 2; $usage .= $_; } } elsif ($parse_state == 2) { if (s/(?<!\\) ((?>\\\\)*) "; $/$1/x) { $usage .= $_; return $usage; } else { $usage .= $_; } } } die "$channeldefs_pm: unexpected EOF in state $parse_state\n"; } sub extract_perl_assignment (*$$$) { my ($fh, $source, $channeldefs_pm, $what) = @_; my $value = ""; my $parse_state = 0; local $_; while (<$fh>) { if ($parse_state == 0) { if (s/^\$\Q${what}\E = (?=")//o) { $value .= $_; $parse_state = 1; } } elsif ($parse_state == 1) { if (/^"\s*\.\s*Autom4te::ChannelDefs::usage\s*(?:\(\))?\s*\.\s*"$/) { $value .= extract_channeldefs_usage ($channeldefs_pm); } elsif (/^";$/) { $value .= '"'; return eval_qq_no_interpolation ($value); } else { $value .= $_; } } } die "$source: unexpected EOF in state $parse_state\n"; } ## ------------------------------ ## ## Extraction from shell scripts. ## ## ------------------------------ ## sub extract_shell_assignment (*$$) { my ($fh, $source, $what) = @_; my $value = ""; my $parse_state = 0; local $_; while (<$fh>) { if ($parse_state == 0) { if (/^\Q${what}\E=\[\"\\$/) { $parse_state = 1; } } elsif ($parse_state == 1) { my $done = s/"\]$//; $value .= $_; if ($done) { # This is not strictly correct but it works acceptably # for the escapes that actually occur in the strings # we're extracting. return eval_qq_no_interpolation ('"'.$value.'"'); } } } die "$source: unexpected EOF in state $parse_state\n"; } ## -------------- ## ## Main program. ## ## -------------- ## sub extract_assignment ($$$) { my ($source, $channeldefs_pm, $what) = @_; open (my $fh, "<", $source) or die "$source: $!\n"; my $firstline = <$fh>; if ($firstline =~ /\@PERL\@/ || $firstline =~ /-\*-\s*perl\s*-\*-/i) { return extract_perl_assignment ($fh, $source, $channeldefs_pm, $what); } elsif ($firstline =~ /\bAS_INIT\b/ || $firstline =~ /bin\/[a-z0-9]*sh\b/ || $firstline =~ /-\*-\s*shell-script\s*-\*-/i) { return extract_shell_assignment ($fh, $source, $what); } else { die "$source: language not recognized\n"; } } sub main () { # Most of our arguments come from environment variables, because # help2man doesn't allow for passing additional command line # arguments to the wrappers, and it's easier to write the wrappers # to not mess with the command line. my $usage = "Usage: $0 script-source (--help | --version) Extract help and version information from a perl or shell script. Required environment variables: top_srcdir relative path from cwd to the top of the source tree channeldefs_pm relative path from top_srcdir to ChannelDefs.pm PACKAGE_NAME the autoconf PACKAGE_NAME substitution variable VERSION the autoconf VERSION substitution variable RELEASE_YEAR the autoconf RELEASE_YEAR substitution variable The script-source argument should also be relative to top_srcdir. "; my $source = shift(@ARGV) || die $usage; my $what = shift(@ARGV) || die $usage; my $top_srcdir = $ENV{top_srcdir} || die $usage; my $channeldefs_pm = $ENV{channeldefs_pm} || die $usage; my $package_name = $ENV{PACKAGE_NAME} || die $usage; my $version = $ENV{VERSION} || die $usage; my $release_year = $ENV{RELEASE_YEAR} || die $usage; if ($what eq "-h" || $what eq "--help") { $what = "help"; } elsif ($what eq "-V" || $what eq "--version") { $what = "version"; } else { die $usage; } my $cmd_name = $source =~ s{^.*/([^./]+)\.(?:as|in)$}{$1}r; $source = File::Spec->catfile($top_srcdir, $source); $channeldefs_pm = File::Spec->catfile($top_srcdir, $channeldefs_pm); my $text = extract_assignment ($source, $channeldefs_pm, $what); $text =~ s/\$0\b/$cmd_name/g; $text =~ s/[@]PACKAGE_NAME@/$package_name/g; $text =~ s/[@]VERSION@/$version/g; $text =~ s/[@]RELEASE_YEAR@/$release_year/g; print $text; } main; ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/doc/����������������������������������������������������������������������������������0000755�0000000�0000000�00000000000�14004625653�011002� 5����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/doc/version.texi����������������������������������������������������������������������0000644�0000000�0000000�00000000141�14004623447�013275� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������@set UPDATED 28 January 2021 @set UPDATED-MONTH January 2021 @set EDITION 2.71 @set VERSION 2.71 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/doc/stamp-vti�������������������������������������������������������������������������0000644�0000000�0000000�00000000141�14004623510�012553� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������@set UPDATED 28 January 2021 @set UPDATED-MONTH January 2021 @set EDITION 2.71 @set VERSION 2.71 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/doc/fdl.texi��������������������������������������������������������������������������0000644�0000000�0000000�00000055612�13765663120�012377� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������@c The GNU Free Documentation License. @center Version 1.3, 3 November 2008 @c This file is intended to be included within another document, @c hence no sectioning command or @node. @display Copyright @copyright{} 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc. @uref{https://fsf.org/} Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @end display @enumerate 0 @item PREAMBLE The purpose of this License is to make a manual, textbook, or other functional and useful document @dfn{free} in the sense of freedom: to assure everyone the effective freedom to copy and redistribute it, with or without modifying it, either commercially or noncommercially. Secondarily, this License preserves for the author and publisher a way to get credit for their work, while not being considered responsible for modifications made by others. This License is a kind of ``copyleft'', which means that derivative works of the document must themselves be free in the same sense. It complements the GNU General Public License, which is a copyleft license designed for free software. We have designed this License in order to use it for manuals for free software, because free software needs free documentation: a free program should come with manuals providing the same freedoms that the software does. But this License is not limited to software manuals; it can be used for any textual work, regardless of subject matter or whether it is published as a printed book. We recommend this License principally for works whose purpose is instruction or reference. @item APPLICABILITY AND DEFINITIONS This License applies to any manual or other work, in any medium, that contains a notice placed by the copyright holder saying it can be distributed under the terms of this License. Such a notice grants a world-wide, royalty-free license, unlimited in duration, to use that work under the conditions stated herein. The ``Document'', below, refers to any such manual or work. Any member of the public is a licensee, and is addressed as ``you''. You accept the license if you copy, modify or distribute the work in a way requiring permission under copyright law. A ``Modified Version'' of the Document means any work containing the Document or a portion of it, either copied verbatim, or with modifications and/or translated into another language. A ``Secondary Section'' is a named appendix or a front-matter section of the Document that deals exclusively with the relationship of the publishers or authors of the Document to the Document's overall subject (or to related matters) and contains nothing that could fall directly within that overall subject. (Thus, if the Document is in part a textbook of mathematics, a Secondary Section may not explain any mathematics.) The relationship could be a matter of historical connection with the subject or with related matters, or of legal, commercial, philosophical, ethical or political position regarding them. The ``Invariant Sections'' are certain Secondary Sections whose titles are designated, as being those of Invariant Sections, in the notice that says that the Document is released under this License. If a section does not fit the above definition of Secondary then it is not allowed to be designated as Invariant. The Document may contain zero Invariant Sections. If the Document does not identify any Invariant Sections then there are none. The ``Cover Texts'' are certain short passages of text that are listed, as Front-Cover Texts or Back-Cover Texts, in the notice that says that the Document is released under this License. A Front-Cover Text may be at most 5 words, and a Back-Cover Text may be at most 25 words. A ``Transparent'' copy of the Document means a machine-readable copy, represented in a format whose specification is available to the general public, that is suitable for revising the document straightforwardly with generic text editors or (for images composed of pixels) generic paint programs or (for drawings) some widely available drawing editor, and that is suitable for input to text formatters or for automatic translation to a variety of formats suitable for input to text formatters. A copy made in an otherwise Transparent file format whose markup, or absence of markup, has been arranged to thwart or discourage subsequent modification by readers is not Transparent. An image format is not Transparent if used for any substantial amount of text. A copy that is not ``Transparent'' is called ``Opaque''. Examples of suitable formats for Transparent copies include plain ASCII without markup, Texinfo input format, La@TeX{} input format, SGML or XML using a publicly available DTD, and standard-conforming simple HTML, PostScript or PDF designed for human modification. Examples of transparent image formats include PNG, XCF and JPG@. Opaque formats include proprietary formats that can be read and edited only by proprietary word processors, SGML or XML for which the DTD and/or processing tools are not generally available, and the machine-generated HTML, PostScript or PDF produced by some word processors for output purposes only. The ``Title Page'' means, for a printed book, the title page itself, plus such following pages as are needed to hold, legibly, the material this License requires to appear in the title page. For works in formats which do not have any title page as such, ``Title Page'' means the text near the most prominent appearance of the work's title, preceding the beginning of the body of the text. The ``publisher'' means any person or entity that distributes copies of the Document to the public. A section ``Entitled XYZ'' means a named subunit of the Document whose title either is precisely XYZ or contains XYZ in parentheses following text that translates XYZ in another language. (Here XYZ stands for a specific section name mentioned below, such as ``Acknowledgements'', ``Dedications'', ``Endorsements'', or ``History''.) To ``Preserve the Title'' of such a section when you modify the Document means that it remains a section ``Entitled XYZ'' according to this definition. The Document may include Warranty Disclaimers next to the notice which states that this License applies to the Document. These Warranty Disclaimers are considered to be included by reference in this License, but only as regards disclaiming warranties: any other implication that these Warranty Disclaimers may have is void and has no effect on the meaning of this License. @item VERBATIM COPYING You may copy and distribute the Document in any medium, either commercially or noncommercially, provided that this License, the copyright notices, and the license notice saying this License applies to the Document are reproduced in all copies, and that you add no other conditions whatsoever to those of this License. You may not use technical measures to obstruct or control the reading or further copying of the copies you make or distribute. However, you may accept compensation in exchange for copies. If you distribute a large enough number of copies you must also follow the conditions in section 3. You may also lend copies, under the same conditions stated above, and you may publicly display copies. @item COPYING IN QUANTITY If you publish printed copies (or copies in media that commonly have printed covers) of the Document, numbering more than 100, and the Document's license notice requires Cover Texts, you must enclose the copies in covers that carry, clearly and legibly, all these Cover Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on the back cover. Both covers must also clearly and legibly identify you as the publisher of these copies. The front cover must present the full title with all words of the title equally prominent and visible. You may add other material on the covers in addition. Copying with changes limited to the covers, as long as they preserve the title of the Document and satisfy these conditions, can be treated as verbatim copying in other respects. If the required texts for either cover are too voluminous to fit legibly, you should put the first ones listed (as many as fit reasonably) on the actual cover, and continue the rest onto adjacent pages. If you publish or distribute Opaque copies of the Document numbering more than 100, you must either include a machine-readable Transparent copy along with each Opaque copy, or state in or with each Opaque copy a computer-network location from which the general network-using public has access to download using public-standard network protocols a complete Transparent copy of the Document, free of added material. If you use the latter option, you must take reasonably prudent steps, when you begin distribution of Opaque copies in quantity, to ensure that this Transparent copy will remain thus accessible at the stated location until at least one year after the last time you distribute an Opaque copy (directly or through your agents or retailers) of that edition to the public. It is requested, but not required, that you contact the authors of the Document well before redistributing any large number of copies, to give them a chance to provide you with an updated version of the Document. @item MODIFICATIONS You may copy and distribute a Modified Version of the Document under the conditions of sections 2 and 3 above, provided that you release the Modified Version under precisely this License, with the Modified Version filling the role of the Document, thus licensing distribution and modification of the Modified Version to whoever possesses a copy of it. In addition, you must do these things in the Modified Version: @enumerate A @item Use in the Title Page (and on the covers, if any) a title distinct from that of the Document, and from those of previous versions (which should, if there were any, be listed in the History section of the Document). You may use the same title as a previous version if the original publisher of that version gives permission. @item List on the Title Page, as authors, one or more persons or entities responsible for authorship of the modifications in the Modified Version, together with at least five of the principal authors of the Document (all of its principal authors, if it has fewer than five), unless they release you from this requirement. @item State on the Title page the name of the publisher of the Modified Version, as the publisher. @item Preserve all the copyright notices of the Document. @item Add an appropriate copyright notice for your modifications adjacent to the other copyright notices. @item Include, immediately after the copyright notices, a license notice giving the public permission to use the Modified Version under the terms of this License, in the form shown in the Addendum below. @item Preserve in that license notice the full lists of Invariant Sections and required Cover Texts given in the Document's license notice. @item Include an unaltered copy of this License. @item Preserve the section Entitled ``History'', Preserve its Title, and add to it an item stating at least the title, year, new authors, and publisher of the Modified Version as given on the Title Page. If there is no section Entitled ``History'' in the Document, create one stating the title, year, authors, and publisher of the Document as given on its Title Page, then add an item describing the Modified Version as stated in the previous sentence. @item Preserve the network location, if any, given in the Document for public access to a Transparent copy of the Document, and likewise the network locations given in the Document for previous versions it was based on. These may be placed in the ``History'' section. You may omit a network location for a work that was published at least four years before the Document itself, or if the original publisher of the version it refers to gives permission. @item For any section Entitled ``Acknowledgements'' or ``Dedications'', Preserve the Title of the section, and preserve in the section all the substance and tone of each of the contributor acknowledgements and/or dedications given therein. @item Preserve all the Invariant Sections of the Document, unaltered in their text and in their titles. Section numbers or the equivalent are not considered part of the section titles. @item Delete any section Entitled ``Endorsements''. Such a section may not be included in the Modified Version. @item Do not retitle any existing section to be Entitled ``Endorsements'' or to conflict in title with any Invariant Section. @item Preserve any Warranty Disclaimers. @end enumerate If the Modified Version includes new front-matter sections or appendices that qualify as Secondary Sections and contain no material copied from the Document, you may at your option designate some or all of these sections as invariant. To do this, add their titles to the list of Invariant Sections in the Modified Version's license notice. These titles must be distinct from any other section titles. You may add a section Entitled ``Endorsements'', provided it contains nothing but endorsements of your Modified Version by various parties---for example, statements of peer review or that the text has been approved by an organization as the authoritative definition of a standard. You may add a passage of up to five words as a Front-Cover Text, and a passage of up to 25 words as a Back-Cover Text, to the end of the list of Cover Texts in the Modified Version. Only one passage of Front-Cover Text and one of Back-Cover Text may be added by (or through arrangements made by) any one entity. If the Document already includes a cover text for the same cover, previously added by you or by arrangement made by the same entity you are acting on behalf of, you may not add another; but you may replace the old one, on explicit permission from the previous publisher that added the old one. The author(s) and publisher(s) of the Document do not by this License give permission to use their names for publicity for or to assert or imply endorsement of any Modified Version. @item COMBINING DOCUMENTS You may combine the Document with other documents released under this License, under the terms defined in section 4 above for modified versions, provided that you include in the combination all of the Invariant Sections of all of the original documents, unmodified, and list them all as Invariant Sections of your combined work in its license notice, and that you preserve all their Warranty Disclaimers. The combined work need only contain one copy of this License, and multiple identical Invariant Sections may be replaced with a single copy. If there are multiple Invariant Sections with the same name but different contents, make the title of each such section unique by adding at the end of it, in parentheses, the name of the original author or publisher of that section if known, or else a unique number. Make the same adjustment to the section titles in the list of Invariant Sections in the license notice of the combined work. In the combination, you must combine any sections Entitled ``History'' in the various original documents, forming one section Entitled ``History''; likewise combine any sections Entitled ``Acknowledgements'', and any sections Entitled ``Dedications''. You must delete all sections Entitled ``Endorsements.'' @item COLLECTIONS OF DOCUMENTS You may make a collection consisting of the Document and other documents released under this License, and replace the individual copies of this License in the various documents with a single copy that is included in the collection, provided that you follow the rules of this License for verbatim copying of each of the documents in all other respects. You may extract a single document from such a collection, and distribute it individually under this License, provided you insert a copy of this License into the extracted document, and follow this License in all other respects regarding verbatim copying of that document. @item AGGREGATION WITH INDEPENDENT WORKS A compilation of the Document or its derivatives with other separate and independent documents or works, in or on a volume of a storage or distribution medium, is called an ``aggregate'' if the copyright resulting from the compilation is not used to limit the legal rights of the compilation's users beyond what the individual works permit. When the Document is included in an aggregate, this License does not apply to the other works in the aggregate which are not themselves derivative works of the Document. If the Cover Text requirement of section 3 is applicable to these copies of the Document, then if the Document is less than one half of the entire aggregate, the Document's Cover Texts may be placed on covers that bracket the Document within the aggregate, or the electronic equivalent of covers if the Document is in electronic form. Otherwise they must appear on printed covers that bracket the whole aggregate. @item TRANSLATION Translation is considered a kind of modification, so you may distribute translations of the Document under the terms of section 4. Replacing Invariant Sections with translations requires special permission from their copyright holders, but you may include translations of some or all Invariant Sections in addition to the original versions of these Invariant Sections. You may include a translation of this License, and all the license notices in the Document, and any Warranty Disclaimers, provided that you also include the original English version of this License and the original versions of those notices and disclaimers. In case of a disagreement between the translation and the original version of this License or a notice or disclaimer, the original version will prevail. If a section in the Document is Entitled ``Acknowledgements'', ``Dedications'', or ``History'', the requirement (section 4) to Preserve its Title (section 1) will typically require changing the actual title. @item TERMINATION You may not copy, modify, sublicense, or distribute the Document except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, or distribute it is void, and will automatically terminate your rights under this License. However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, receipt of a copy of some or all of the same material does not give you any rights to use it. @item FUTURE REVISIONS OF THIS LICENSE The Free Software Foundation may publish new, revised versions of the GNU Free Documentation License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. See @uref{https://www.gnu.org/licenses/}. Each version of the License is given a distinguishing version number. If the Document specifies that a particular numbered version of this License ``or any later version'' applies to it, you have the option of following the terms and conditions either of that specified version or of any later version that has been published (not as a draft) by the Free Software Foundation. If the Document does not specify a version number of this License, you may choose any version ever published (not as a draft) by the Free Software Foundation. If the Document specifies that a proxy can decide which future versions of this License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Document. @item RELICENSING ``Massive Multiauthor Collaboration Site'' (or ``MMC Site'') means any World Wide Web server that publishes copyrightable works and also provides prominent facilities for anybody to edit those works. A public wiki that anybody can edit is an example of such a server. A ``Massive Multiauthor Collaboration'' (or ``MMC'') contained in the site means any set of copyrightable works thus published on the MMC site. ``CC-BY-SA'' means the Creative Commons Attribution-Share Alike 3.0 license published by Creative Commons Corporation, a not-for-profit corporation with a principal place of business in San Francisco, California, as well as future copyleft versions of that license published by that same organization. ``Incorporate'' means to publish or republish a Document, in whole or in part, as part of another Document. An MMC is ``eligible for relicensing'' if it is licensed under this License, and if all works that were first published under this License somewhere other than this MMC, and subsequently incorporated in whole or in part into the MMC, (1) had no cover texts or invariant sections, and (2) were thus incorporated prior to November 1, 2008. The operator of an MMC Site may republish an MMC contained in the site under CC-BY-SA on the same site at any time before August 1, 2009, provided the MMC is eligible for relicensing. @end enumerate @page @heading ADDENDUM: How to use this License for your documents To use this License in a document you have written, include a copy of the License in the document and put the following copyright and license notices just after the title page: @smallexample @group Copyright (C) @var{year} @var{your name}. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitled ``GNU Free Documentation License''. @end group @end smallexample If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, replace the ``with@dots{}Texts.''@: line with this: @smallexample @group with the Invariant Sections being @var{list their titles}, with the Front-Cover Texts being @var{list}, and with the Back-Cover Texts being @var{list}. @end group @end smallexample If you have Invariant Sections without Cover Texts, or some other combination of the three, merge those two alternatives to suit the situation. If your document contains nontrivial examples of program code, we recommend releasing these examples in parallel under your choice of free software license, such as the GNU General Public License, to permit their use in free software. @c Local Variables: @c ispell-local-pdict: "ispell-dict" @c End: ����������������������������������������������������������������������������������������������������������������������autoconf-2.71/doc/install.texi����������������������������������������������������������������������0000644�0000000�0000000�00000041770�14004621270�013263� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������@c This file is included by autoconf.texi and is used to produce @c the INSTALL file. @ifclear autoconf @unnumbered Installation Instructions Copyright @copyright{} 1994--1996, 1999--2002, 2004--2017, 2020--2021 Free Software Foundation, Inc. Copying and distribution of this file, with or without modification, are permitted in any medium without royalty provided the copyright notice and this notice are preserved. This file is offered as-is, without warranty of any kind. @end ifclear @node Basic Installation @section Basic Installation Briefly, the shell command @samp{./configure@tie{}&& make@tie{}&& make@tie{}install} should configure, build, and install this package. The following more-detailed instructions are generic; see the @file{README} file for instructions specific to this package. @ifclear autoconf Some packages provide this @file{INSTALL} file but do not implement all of the features documented below. The lack of an optional feature in a given package is not necessarily a bug. @end ifclear More recommendations for GNU packages can be found in @ref{Makefile Conventions, , Makefile Conventions, standards, GNU Coding Standards}. The @command{configure} shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses those values to create a @file{Makefile} in each directory of the package. It may also create one or more @file{.h} files containing system-dependent definitions. Finally, it creates a shell script @file{config.status} that you can run in the future to recreate the current configuration, and a file @file{config.log} containing compiler output (useful mainly for debugging @command{configure}). It can also use an optional file (typically called @file{config.cache} and enabled with @option{--cache-file=config.cache} or simply @option{-C}) that saves the results of its tests to speed up reconfiguring. Caching is disabled by default to prevent problems with accidental use of stale cache files. If you need to do unusual things to compile the package, please try to figure out how @command{configure} could check whether to do them, and mail diffs or instructions to the address given in the @file{README} so they can be considered for the next release. If you are using the cache, and at some point @file{config.cache} contains results you don't want to keep, you may remove or edit it. The file @file{configure.ac} (or @file{configure.in}) is used to create @file{configure} by a program called @command{autoconf}. You need @file{configure.ac} if you want to change it or regenerate @file{configure} using a newer version of @command{autoconf}. The simplest way to compile this package is: @enumerate @item @command{cd} to the directory containing the package's source code and type @samp{./configure} to configure the package for your system. Running @command{configure} might take a while. While running, it prints some messages telling which features it is checking for. @item Type @samp{make} to compile the package. @item Optionally, type @samp{make check} to run any self-tests that come with the package, generally using the just-built uninstalled binaries. @item Type @samp{make install} to install the programs and any data files and documentation. When installing into a prefix owned by root, it is recommended that the package be configured and built as a regular user, and only the @samp{make install} phase executed with root privileges. @item Optionally, type @samp{make installcheck} to repeat any self-tests, but this time using the binaries in their final installed location. This target does not install anything. Running this target as a regular user, particularly if the prior @samp{make install} required root privileges, verifies that the installation completed correctly. @item You can remove the program binaries and object files from the source code directory by typing @samp{make clean}. To also remove the files that @command{configure} created (so you can compile the package for a different kind of computer), type @samp{make distclean}. There is also a @samp{make maintainer-clean} target, but that is intended mainly for the package's developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. @item Often, you can also type @samp{make uninstall} to remove the installed files again. In practice, not all packages have tested that uninstallation works correctly, even though it is required by the GNU Coding Standards. @item Some packages, particularly those that use Automake, provide @samp{make distcheck}, which can by used by developers to test that all other targets like @samp{make install} and @samp{make uninstall} work correctly. This target is generally not run by end users. @end enumerate @node Compilers and Options @section Compilers and Options Some systems require unusual options for compilation or linking that the @command{configure} script does not know about. Run @samp{./configure --help} for details on some of the pertinent environment variables. You can give @command{configure} initial values for configuration parameters by setting variables in the command line or in the environment. Here is an example: @example ./configure CC=c99 CFLAGS=-g LIBS=-lposix @end example @xref{Defining Variables}, for more details. @node Multiple Architectures @section Compiling For Multiple Architectures You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their own directory. To do this, you can use GNU @command{make}. @command{cd} to the directory where you want the object files and executables to go and run the @command{configure} script. @command{configure} automatically checks for the source code in the directory that @command{configure} is in and in @file{..}. This is known as a @dfn{VPATH} build. With a non-GNU @command{make}, it is safer to compile the package for one architecture at a time in the source code directory. After you have installed the package for one architecture, use @samp{make distclean} before reconfiguring for another architecture. On MacOS X 10.5 and later systems, you can create libraries and executables that work on multiple system types---known as @dfn{fat} or @dfn{universal} binaries---by specifying multiple @option{-arch} options to the compiler but only a single @option{-arch} option to the preprocessor. Like this: @example ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CPP="gcc -E" CXXCPP="g++ -E" @end example This is not guaranteed to produce working output in all cases, you may have to build one architecture at a time and combine the results using the @command{lipo} tool if you have problems. @node Installation Names @section Installation Names By default, @samp{make install} installs the package's commands under @file{/usr/local/bin}, include files under @file{/usr/local/include}, etc. You can specify an installation prefix other than @file{/usr/local} by giving @command{configure} the option @option{--prefix=@var{prefix}}, where @var{prefix} must be an absolute file name. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you pass the option @option{--exec-prefix=@var{prefix}} to @command{configure}, the package uses @var{prefix} as the prefix for installing programs and libraries. Documentation and other data files still use the regular prefix. In addition, if you use an unusual directory layout you can give options like @option{--bindir=@var{dir}} to specify different values for particular kinds of files. Run @samp{configure --help} for a list of the directories you can set and what kinds of files go in them. In general, the default for these options is expressed in terms of @samp{$@{prefix@}}, so that specifying just @option{--prefix} will affect all of the other directory specifications that were not explicitly provided. The most portable way to affect installation locations is to pass the correct locations to @command{configure}; however, many packages provide one or both of the following shortcuts of passing variable assignments to the @samp{make install} command line to change installation locations without having to reconfigure or recompile. The first method involves providing an override variable for each affected directory. For example, @samp{make install prefix=/alternate/directory} will choose an alternate location for all directory configuration variables that were expressed in terms of @samp{$@{prefix@}}. Any directories that were specified during @command{configure}, but not in terms of @samp{$@{prefix@}}, must each be overridden at install time for the entire installation to be relocated. The approach of makefile variable overrides for each directory variable is required by the GNU Coding Standards, and ideally causes no recompilation. However, some platforms have known limitations with the semantics of shared libraries that end up requiring recompilation when using this method, particularly noticeable in packages that use GNU Libtool. The second method involves providing the @samp{DESTDIR} variable. For example, @samp{make install DESTDIR=/alternate/directory} will prepend @samp{/alternate/directory} before all installation names. The approach of @samp{DESTDIR} overrides is not required by the GNU Coding Standards, and does not work on platforms that have drive letters. On the other hand, it does better at avoiding recompilation issues, and works well even when some directory options were not specified in terms of @samp{$@{prefix@}} at @command{configure} time. @node Optional Features @section Optional Features If the package supports it, you can cause programs to be installed with an extra prefix or suffix on their names by giving @command{configure} the option @option{--program-prefix=@var{PREFIX}} or @option{--program-suffix=@var{SUFFIX}}. Some packages pay attention to @option{--enable-@var{feature}} options to @command{configure}, where @var{feature} indicates an optional part of the package. They may also pay attention to @option{--with-@var{package}} options, where @var{package} is something like @samp{gnu-as} or @samp{x} (for the X Window System). The @file{README} should mention any @option{--enable-} and @option{--with-} options that the package recognizes. For packages that use the X Window System, @command{configure} can usually find the X include and library files automatically, but if it doesn't, you can use the @command{configure} options @option{--x-includes=@var{dir}} and @option{--x-libraries=@var{dir}} to specify their locations. Some packages offer the ability to configure how verbose the execution of @command{make} will be. For these packages, running @samp{./configure --enable-silent-rules} sets the default to minimal output, which can be overridden with @code{make V=1}; while running @samp{./configure --disable-silent-rules} sets the default to verbose, which can be overridden with @code{make V=0}. @node Particular Systems @section Particular systems On HP-UX, the default C compiler is not ANSI C compatible. If GNU CC is not installed, it is recommended to use the following options in order to use an ANSI C compiler: @example ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" @end example @noindent and if that doesn't work, install pre-built binaries of GCC for HP-UX. HP-UX @command{make} updates targets which have the same timestamps as their prerequisites, which makes it generally unusable when shipped generated files such as @command{configure} are involved. Use GNU @command{make} instead. On OSF/1 a.k.a.@: Tru64, some versions of the default C compiler cannot parse its @code{<wchar.h>} header file. The option @option{-nodtk} can be used as a workaround. If GNU CC is not installed, it is therefore recommended to try @example ./configure CC="cc" @end example @noindent and if that doesn't work, try @example ./configure CC="cc -nodtk" @end example On Solaris, don't put @code{/usr/ucb} early in your @env{PATH}. This directory contains several dysfunctional programs; working variants of these programs are available in @code{/usr/bin}. So, if you need @code{/usr/ucb} in your @env{PATH}, put it @emph{after} @code{/usr/bin}. On Haiku, software installed for all users goes in @file{/boot/common}, not @file{/usr/local}. It is recommended to use the following options: @example ./configure --prefix=/boot/common @end example @node System Type @section Specifying the System Type There may be some features @command{configure} cannot figure out automatically, but needs to determine by the type of machine the package will run on. Usually, assuming the package is built to be run on the @emph{same} architectures, @command{configure} can figure that out, but if it prints a message saying it cannot guess the machine type, give it the @option{--build=@var{type}} option. @var{type} can either be a short name for the system type, such as @samp{sun4}, or a canonical name which has the form: @example @var{cpu}-@var{company}-@var{system} @end example @noindent where @var{system} can have one of these forms: @example @var{os} @var{kernel}-@var{os} @end example See the file @file{config.sub} for the possible values of each field. If @file{config.sub} isn't included in this package, then this package doesn't need to know the machine type. If you are @emph{building} compiler tools for cross-compiling, you should use the option @option{--target=@var{type}} to select the type of system they will produce code for. If you want to @emph{use} a cross compiler, that generates code for a platform different from the build platform, you should specify the @dfn{host} platform (i.e., that on which the generated programs will eventually be run) with @option{--host=@var{type}}. @node Sharing Defaults @section Sharing Defaults If you want to set default values for @command{configure} scripts to share, you can create a site shell script called @file{config.site} that gives default values for variables like @code{CC}, @code{cache_file}, and @code{prefix}. @command{configure} looks for @file{@var{prefix}/share/config.site} if it exists, then @file{@var{prefix}/etc/config.site} if it exists. Or, you can set the @code{CONFIG_SITE} environment variable to the location of the site script. A warning: not all @command{configure} scripts look for a site script. @node Defining Variables @section Defining Variables Variables not defined in a site shell script can be set in the environment passed to @command{configure}. However, some packages may run configure again during the build, and the customized values of these variables may be lost. In order to avoid this problem, you should set them in the @command{configure} command line, using @samp{VAR=value}. For example: @example ./configure CC=/usr/local2/bin/gcc @end example @noindent causes the specified @command{gcc} to be used as the C compiler (unless it is overridden in the site shell script). @noindent Unfortunately, this technique does not work for @env{CONFIG_SHELL} due to an Autoconf limitation. Until the limitation is lifted, you can use this workaround: @example CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash @end example @node configure Invocation @section @command{configure} Invocation @command{configure} recognizes the following options to control how it operates. @table @option @item --help @itemx -h Print a summary of all of the options to @command{configure}, and exit. @item --help=short @itemx --help=recursive Print a summary of the options unique to this package's @command{configure}, and exit. The @code{short} variant lists options used only in the top level, while the @code{recursive} variant lists options also present in any nested packages. @item --version @itemx -V Print the version of Autoconf used to generate the @command{configure} script, and exit. @item --cache-file=@var{file} @cindex Cache, enabling Enable the cache: use and save the results of the tests in @var{file}, traditionally @file{config.cache}. @var{file} defaults to @file{/dev/null} to disable caching. @item --config-cache @itemx -C Alias for @option{--cache-file=config.cache}. @item --quiet @itemx --silent @itemx -q Do not print messages saying which checks are being made. To suppress all normal output, redirect it to @file{/dev/null} (any error messages will still be shown). @item --srcdir=@var{dir} Look for the package's source code in directory @var{dir}. Usually @command{configure} can determine that directory automatically. @item --prefix=@var{dir} Use @var{dir} as the installation prefix. @ref{Installation Names} for more details, including other options available for fine-tuning the installation locations. @item --no-create @itemx -n Run the configure checks, but stop before creating any output files. @end table @noindent @command{configure} also accepts some other, not widely useful, options. Run @samp{configure --help} for more details. @c Local Variables: @c fill-column: 72 @c ispell-local-dictionary: "american" @c indent-tabs-mode: nil @c whitespace-check-buffer-indent: nil @c End: ��������autoconf-2.71/doc/gnu-oids.texi���������������������������������������������������������������������0000644�0000000�0000000�00000004101�13765663120�013342� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������@c This table of OID's is included in the GNU Coding Standards. @c @c Copyright 2008, 2009, 2010, 2013 Free Software Foundation, Inc. @c @c Copying and distribution of this file, with or without modification, @c are permitted in any medium without royalty provided the copyright @c notice and this notice are preserved. @c @c When adding new OIDs, please add them also to @c https://www.alvestrand.no/objectid/ @c (Our page is https://www.alvestrand.no/objectid/1.3.6.1.4.1.11591.html.) 1.3.6.1.4.1.11591 GNU 1.3.6.1.4.1.11591.1 GNU Radius 1.3.6.1.4.1.11591.2 GnuPG 1.3.6.1.4.1.11591.2.1 notation 1.3.6.1.4.1.11591.2.1.1 pkaAddress 1.3.6.1.4.1.11591.3 GNU Radar 1.3.6.1.4.1.11591.4 GNU GSS @c Added 2008-10-24 on request from Sergey Poznyakoff <gray@gnu.org.ua> 1.3.6.1.4.1.11591.5 GNU Mailutils @c Added 2009-03-03 on request from Simon Josefsson <simon@josefsson.org> 1.3.6.1.4.1.11591.6 GNU Shishi @c Added 2010-05-17 on request from Eric Blossom <eb@comsec.com> 1.3.6.1.4.1.11591.7 GNU Radio @c Added 2010-07-02 on request from Sergey Poznyakoff <gray@gnu.org.ua> 1.3.6.1.4.1.11591.8 GNU Dico @c Added 2013-12-17 on request from Sergey Poznyakoff <gray@gnu.org.ua> 1.3.6.1.4.1.11591.9 GNU Rush 1.3.6.1.4.1.11591.12 digestAlgorithm 1.3.6.1.4.1.11591.12.2 TIGER/192 1.3.6.1.4.1.11591.13 encryptionAlgorithm 1.3.6.1.4.1.11591.13.2 Serpent 1.3.6.1.4.1.11591.13.2.1 Serpent-128-ECB 1.3.6.1.4.1.11591.13.2.2 Serpent-128-CBC 1.3.6.1.4.1.11591.13.2.3 Serpent-128-OFB 1.3.6.1.4.1.11591.13.2.4 Serpent-128-CFB 1.3.6.1.4.1.11591.13.2.21 Serpent-192-ECB 1.3.6.1.4.1.11591.13.2.22 Serpent-192-CBC 1.3.6.1.4.1.11591.13.2.23 Serpent-192-OFB 1.3.6.1.4.1.11591.13.2.24 Serpent-192-CFB 1.3.6.1.4.1.11591.13.2.41 Serpent-256-ECB 1.3.6.1.4.1.11591.13.2.42 Serpent-256-CBC 1.3.6.1.4.1.11591.13.2.43 Serpent-256-OFB 1.3.6.1.4.1.11591.13.2.44 Serpent-256-CFB 1.3.6.1.4.1.11591.14 CRC algorithms 1.3.6.1.4.1.11591.14.1 CRC 32 @c Added 2013-12-05 on request from Werner Koch <wk@gnupg.org> 1.3.6.1.4.1.11591.15 ellipticCurve 1.3.6.1.4.1.11591.15.1 Ed25519 ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/doc/make-stds.texi��������������������������������������������������������������������0000644�0000000�0000000�00000134647�13765663120�013530� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������@comment This file is included by both standards.texi and make.texinfo. @comment It was broken out of standards.texi on 1/6/93 by roland. @node Makefile Conventions @chapter Makefile Conventions @cindex makefile, conventions for @cindex conventions for makefiles @cindex standards for makefiles @c Copyright 1992, 1993, 1994, 1995, 1996, 1997, 1998, 2000, 2001, @c 2004, 2005, 2006, 2007, 2008, 2010, 2013, 2014, 2015 @c Free Software Foundation, Inc. @c @c Permission is granted to copy, distribute and/or modify this document @c under the terms of the GNU Free Documentation License, Version 1.3 @c or any later version published by the Free Software Foundation; @c with no Invariant Sections, with no @c Front-Cover Texts, and with no Back-Cover Texts. @c A copy of the license is included in the section entitled ``GNU @c Free Documentation License''. This @ifinfo node @end ifinfo @iftex @ifset CODESTD section @end ifset @ifclear CODESTD chapter @end ifclear @end iftex describes conventions for writing the Makefiles for GNU programs. Using Automake will help you write a Makefile that follows these conventions. For more information on portable Makefiles, see @sc{posix} and @ref{Portable Make, Portable Make Programming,, autoconf, Autoconf}. @menu * Makefile Basics:: General conventions for Makefiles. * Utilities in Makefiles:: Utilities to be used in Makefiles. * Command Variables:: Variables for specifying commands. * DESTDIR:: Supporting staged installs. * Directory Variables:: Variables for installation directories. * Standard Targets:: Standard targets for users. * Install Command Categories:: Three categories of commands in the @samp{install} rule: normal, pre-install and post-install. @end menu @node Makefile Basics @section General Conventions for Makefiles Every Makefile should contain this line: @example SHELL = /bin/sh @end example @noindent to avoid trouble on systems where the @code{SHELL} variable might be inherited from the environment. (This is never a problem with GNU @code{make}.) Different @code{make} programs have incompatible suffix lists and implicit rules, and this sometimes creates confusion or misbehavior. So it is a good idea to set the suffix list explicitly using only the suffixes you need in the particular Makefile, like this: @example .SUFFIXES: .SUFFIXES: .c .o @end example @noindent The first line clears out the suffix list, the second introduces all suffixes which may be subject to implicit rules in this Makefile. Don't assume that @file{.} is in the path for command execution. When you need to run programs that are a part of your package during the make, please make sure that it uses @file{./} if the program is built as part of the make or @file{$(srcdir)/} if the file is an unchanging part of the source code. Without one of these prefixes, the current search path is used. The distinction between @file{./} (the @dfn{build directory}) and @file{$(srcdir)/} (the @dfn{source directory}) is important because users can build in a separate directory using the @samp{--srcdir} option to @file{configure}. A rule of the form: @smallexample foo.1 : foo.man sedscript sed -f sedscript foo.man > foo.1 @end smallexample @noindent will fail when the build directory is not the source directory, because @file{foo.man} and @file{sedscript} are in the source directory. When using GNU @code{make}, relying on @samp{VPATH} to find the source file will work in the case where there is a single dependency file, since the @code{make} automatic variable @samp{$<} will represent the source file wherever it is. (Many versions of @code{make} set @samp{$<} only in implicit rules.) A Makefile target like @smallexample foo.o : bar.c $(CC) -I. -I$(srcdir) $(CFLAGS) -c bar.c -o foo.o @end smallexample @noindent should instead be written as @smallexample foo.o : bar.c $(CC) -I. -I$(srcdir) $(CFLAGS) -c $< -o $@@ @end smallexample @noindent in order to allow @samp{VPATH} to work correctly. When the target has multiple dependencies, using an explicit @samp{$(srcdir)} is the easiest way to make the rule work well. For example, the target above for @file{foo.1} is best written as: @smallexample foo.1 : foo.man sedscript sed -f $(srcdir)/sedscript $(srcdir)/foo.man > $@@ @end smallexample GNU distributions usually contain some files which are not source files---for example, Info files, and the output from Autoconf, Automake, Bison or Flex. Since these files normally appear in the source directory, they should always appear in the source directory, not in the build directory. So Makefile rules to update them should put the updated files in the source directory. However, if a file does not appear in the distribution, then the Makefile should not put it in the source directory, because building a program in ordinary circumstances should not modify the source directory in any way. Try to make the build and installation targets, at least (and all their subtargets) work correctly with a parallel @code{make}. @node Utilities in Makefiles @section Utilities in Makefiles Write the Makefile commands (and any shell scripts, such as @code{configure}) to run under @code{sh} (both the traditional Bourne shell and the @sc{posix} shell), not @code{csh}. Don't use any special features of @code{ksh} or @code{bash}, or @sc{posix} features not widely supported in traditional Bourne @code{sh}. The @code{configure} script and the Makefile rules for building and installation should not use any utilities directly except these: @c dd find @c gunzip gzip md5sum @c mkfifo mknod tee uname @example awk cat cmp cp diff echo egrep expr false grep install-info ln ls mkdir mv printf pwd rm rmdir sed sleep sort tar test touch tr true @end example Compression programs such as @code{gzip} can be used in the @code{dist} rule. Generally, stick to the widely-supported (usually @sc{posix}-specified) options and features of these programs. For example, don't use @samp{mkdir -p}, convenient as it may be, because a few systems don't support it at all and with others, it is not safe for parallel execution. For a list of known incompatibilities, see @ref{Portable Shell, Portable Shell Programming,, autoconf, Autoconf}. It is a good idea to avoid creating symbolic links in makefiles, since a few file systems don't support them. The Makefile rules for building and installation can also use compilers and related programs, but should do so via @code{make} variables so that the user can substitute alternatives. Here are some of the programs we mean: @example ar bison cc flex install ld ldconfig lex make makeinfo ranlib texi2dvi yacc @end example Use the following @code{make} variables to run those programs: @example $(AR) $(BISON) $(CC) $(FLEX) $(INSTALL) $(LD) $(LDCONFIG) $(LEX) $(MAKE) $(MAKEINFO) $(RANLIB) $(TEXI2DVI) $(YACC) @end example When you use @code{ranlib} or @code{ldconfig}, you should make sure nothing bad happens if the system does not have the program in question. Arrange to ignore an error from that command, and print a message before the command to tell the user that failure of this command does not mean a problem. (The Autoconf @samp{AC_PROG_RANLIB} macro can help with this.) If you use symbolic links, you should implement a fallback for systems that don't have symbolic links. Additional utilities that can be used via Make variables are: @example chgrp chmod chown mknod @end example It is ok to use other utilities in Makefile portions (or scripts) intended only for particular systems where you know those utilities exist. @node Command Variables @section Variables for Specifying Commands Makefiles should provide variables for overriding certain commands, options, and so on. In particular, you should run most utility programs via variables. Thus, if you use Bison, have a variable named @code{BISON} whose default value is set with @samp{BISON = bison}, and refer to it with @code{$(BISON)} whenever you need to use Bison. File management utilities such as @code{ln}, @code{rm}, @code{mv}, and so on, need not be referred to through variables in this way, since users don't need to replace them with other programs. Each program-name variable should come with an options variable that is used to supply options to the program. Append @samp{FLAGS} to the program-name variable name to get the options variable name---for example, @code{BISONFLAGS}. (The names @code{CFLAGS} for the C compiler, @code{YFLAGS} for yacc, and @code{LFLAGS} for lex, are exceptions to this rule, but we keep them because they are standard.) Use @code{CPPFLAGS} in any compilation command that runs the preprocessor, and use @code{LDFLAGS} in any compilation command that does linking as well as in any direct use of @code{ld}. If there are C compiler options that @emph{must} be used for proper compilation of certain files, do not include them in @code{CFLAGS}. Users expect to be able to specify @code{CFLAGS} freely themselves. Instead, arrange to pass the necessary options to the C compiler independently of @code{CFLAGS}, by writing them explicitly in the compilation commands or by defining an implicit rule, like this: @smallexample CFLAGS = -g ALL_CFLAGS = -I. $(CFLAGS) .c.o: $(CC) -c $(CPPFLAGS) $(ALL_CFLAGS) $< @end smallexample Do include the @samp{-g} option in @code{CFLAGS}, because that is not @emph{required} for proper compilation. You can consider it a default that is only recommended. If the package is set up so that it is compiled with GCC by default, then you might as well include @samp{-O} in the default value of @code{CFLAGS} as well. Put @code{CFLAGS} last in the compilation command, after other variables containing compiler options, so the user can use @code{CFLAGS} to override the others. @code{CFLAGS} should be used in every invocation of the C compiler, both those which do compilation and those which do linking. Every Makefile should define the variable @code{INSTALL}, which is the basic command for installing a file into the system. Every Makefile should also define the variables @code{INSTALL_PROGRAM} and @code{INSTALL_DATA}. (The default for @code{INSTALL_PROGRAM} should be @code{$(INSTALL)}; the default for @code{INSTALL_DATA} should be @code{$@{INSTALL@} -m 644}.) Then it should use those variables as the commands for actual installation, for executables and non-executables respectively. Minimal use of these variables is as follows: @example $(INSTALL_PROGRAM) foo $(bindir)/foo $(INSTALL_DATA) libfoo.a $(libdir)/libfoo.a @end example However, it is preferable to support a @code{DESTDIR} prefix on the target files, as explained in the next section. It is acceptable, but not required, to install multiple files in one command, with the final argument being a directory, as in: @example $(INSTALL_PROGRAM) foo bar baz $(bindir) @end example @node DESTDIR @section @code{DESTDIR}: Support for Staged Installs @vindex DESTDIR @cindex staged installs @cindex installations, staged @code{DESTDIR} is a variable prepended to each installed target file, like this: @example $(INSTALL_PROGRAM) foo $(DESTDIR)$(bindir)/foo $(INSTALL_DATA) libfoo.a $(DESTDIR)$(libdir)/libfoo.a @end example The @code{DESTDIR} variable is specified by the user on the @code{make} command line as an absolute file name. For example: @example make DESTDIR=/tmp/stage install @end example @noindent @code{DESTDIR} should be supported only in the @code{install*} and @code{uninstall*} targets, as those are the only targets where it is useful. If your installation step would normally install @file{/usr/local/bin/foo} and @file{/usr/@/local/@/lib/@/libfoo.a}, then an installation invoked as in the example above would install @file{/tmp/stage/usr/local/bin/foo} and @file{/tmp/stage/usr/local/lib/libfoo.a} instead. Prepending the variable @code{DESTDIR} to each target in this way provides for @dfn{staged installs}, where the installed files are not placed directly into their expected location but are instead copied into a temporary location (@code{DESTDIR}). However, installed files maintain their relative directory structure and any embedded file names will not be modified. You should not set the value of @code{DESTDIR} in your @file{Makefile} at all; then the files are installed into their expected locations by default. Also, specifying @code{DESTDIR} should not change the operation of the software in any way, so its value should not be included in any file contents. @code{DESTDIR} support is commonly used in package creation. It is also helpful to users who want to understand what a given package will install where, and to allow users who don't normally have permissions to install into protected areas to build and install before gaining those permissions. Finally, it can be useful with tools such as @code{stow}, where code is installed in one place but made to appear to be installed somewhere else using symbolic links or special mount operations. So, we strongly recommend GNU packages support @code{DESTDIR}, though it is not an absolute requirement. @node Directory Variables @section Variables for Installation Directories Installation directories should always be named by variables, so it is easy to install in a nonstandard place. The standard names for these variables and the values they should have in GNU packages are described below. They are based on a standard file system layout; variants of it are used in GNU/Linux and other modern operating systems. Installers are expected to override these values when calling @command{make} (e.g., @kbd{make prefix=/usr install}) or @command{configure} (e.g., @kbd{configure --prefix=/usr}). GNU packages should not try to guess which value should be appropriate for these variables on the system they are being installed onto: use the default settings specified here so that all GNU packages behave identically, allowing the installer to achieve any desired layout. @cindex directories, creating installation @cindex installation directories, creating All installation directories, and their parent directories, should be created (if necessary) before they are installed into. These first two variables set the root for the installation. All the other installation directories should be subdirectories of one of these two, and nothing should be directly installed into these two directories. @table @code @item prefix @vindex prefix A prefix used in constructing the default values of the variables listed below. The default value of @code{prefix} should be @file{/usr/local}. When building the complete GNU system, the prefix will be empty and @file{/usr} will be a symbolic link to @file{/}. (If you are using Autoconf, write it as @samp{@@prefix@@}.) Running @samp{make install} with a different value of @code{prefix} from the one used to build the program should @emph{not} recompile the program. @item exec_prefix @vindex exec_prefix A prefix used in constructing the default values of some of the variables listed below. The default value of @code{exec_prefix} should be @code{$(prefix)}. (If you are using Autoconf, write it as @samp{@@exec_prefix@@}.) Generally, @code{$(exec_prefix)} is used for directories that contain machine-specific files (such as executables and subroutine libraries), while @code{$(prefix)} is used directly for other directories. Running @samp{make install} with a different value of @code{exec_prefix} from the one used to build the program should @emph{not} recompile the program. @end table Executable programs are installed in one of the following directories. @table @code @item bindir @vindex bindir The directory for installing executable programs that users can run. This should normally be @file{/usr/local/bin}, but write it as @file{$(exec_prefix)/bin}. (If you are using Autoconf, write it as @samp{@@bindir@@}.) @item sbindir @vindex sbindir The directory for installing executable programs that can be run from the shell, but are only generally useful to system administrators. This should normally be @file{/usr/local/sbin}, but write it as @file{$(exec_prefix)/sbin}. (If you are using Autoconf, write it as @samp{@@sbindir@@}.) @item libexecdir @vindex libexecdir @comment This paragraph adjusted to avoid overfull hbox --roland 5jul94 The directory for installing executable programs to be run by other programs rather than by users. This directory should normally be @file{/usr/local/libexec}, but write it as @file{$(exec_prefix)/libexec}. (If you are using Autoconf, write it as @samp{@@libexecdir@@}.) The definition of @samp{libexecdir} is the same for all packages, so you should install your data in a subdirectory thereof. Most packages install their data under @file{$(libexecdir)/@var{package-name}/}, possibly within additional subdirectories thereof, such as @file{$(libexecdir)/@var{package-name}/@var{machine}/@var{version}}. @end table Data files used by the program during its execution are divided into categories in two ways. @itemize @bullet @item Some files are normally modified by programs; others are never normally modified (though users may edit some of these). @item Some files are architecture-independent and can be shared by all machines at a site; some are architecture-dependent and can be shared only by machines of the same kind and operating system; others may never be shared between two machines. @end itemize This makes for six different possibilities. However, we want to discourage the use of architecture-dependent files, aside from object files and libraries. It is much cleaner to make other data files architecture-independent, and it is generally not hard. Here are the variables Makefiles should use to specify directories to put these various kinds of files in: @table @samp @item datarootdir The root of the directory tree for read-only architecture-independent data files. This should normally be @file{/usr/local/share}, but write it as @file{$(prefix)/share}. (If you are using Autoconf, write it as @samp{@@datarootdir@@}.) @samp{datadir}'s default value is based on this variable; so are @samp{infodir}, @samp{mandir}, and others. @item datadir The directory for installing idiosyncratic read-only architecture-independent data files for this program. This is usually the same place as @samp{datarootdir}, but we use the two separate variables so that you can move these program-specific files without altering the location for Info files, man pages, etc. @c raggedright (not until next Texinfo release) This should normally be @file{/usr/local/share}, but write it as @file{$(datarootdir)}. (If you are using Autoconf, write it as @samp{@@datadir@@}.) @c end raggedright The definition of @samp{datadir} is the same for all packages, so you should install your data in a subdirectory thereof. Most packages install their data under @file{$(datadir)/@var{package-name}/}. @item sysconfdir The directory for installing read-only data files that pertain to a single machine--that is to say, files for configuring a host. Mailer and network configuration files, @file{/etc/passwd}, and so forth belong here. All the files in this directory should be ordinary ASCII text files. This directory should normally be @file{/usr/local/etc}, but write it as @file{$(prefix)/etc}. (If you are using Autoconf, write it as @samp{@@sysconfdir@@}.) Do not install executables here in this directory (they probably belong in @file{$(libexecdir)} or @file{$(sbindir)}). Also do not install files that are modified in the normal course of their use (programs whose purpose is to change the configuration of the system excluded). Those probably belong in @file{$(localstatedir)}. @item sharedstatedir The directory for installing architecture-independent data files which the programs modify while they run. This should normally be @file{/usr/local/com}, but write it as @file{$(prefix)/com}. (If you are using Autoconf, write it as @samp{@@sharedstatedir@@}.) @item localstatedir The directory for installing data files which the programs modify while they run, and that pertain to one specific machine. Users should never need to modify files in this directory to configure the package's operation; put such configuration information in separate files that go in @file{$(datadir)} or @file{$(sysconfdir)}. @file{$(localstatedir)} should normally be @file{/usr/local/var}, but write it as @file{$(prefix)/var}. (If you are using Autoconf, write it as @samp{@@localstatedir@@}.) @item runstatedir The directory for installing data files which the programs modify while they run, that pertain to one specific machine, and which need not persist longer than the execution of the program---which is generally long-lived, for example, until the next reboot. PID files for system daemons are a typical use. In addition, this directory should not be cleaned except perhaps at reboot, while the general @file{/tmp} (@code{TMPDIR}) may be cleaned arbitrarily. This should normally be @file{/var/run}, but write it as @file{$(localstatedir)/run}. Having it as a separate variable allows the use of @file{/run} if desired, for example. (If you are using Autoconf 2.70 or later, write it as @samp{@@runstatedir@@}.) @end table These variables specify the directory for installing certain specific types of files, if your program has them. Every GNU package should have Info files, so every program needs @samp{infodir}, but not all need @samp{libdir} or @samp{lispdir}. @table @samp @item includedir The directory for installing header files to be included by user programs with the C @samp{#include} preprocessor directive. This should normally be @file{/usr/local/include}, but write it as @file{$(prefix)/include}. (If you are using Autoconf, write it as @samp{@@includedir@@}.) Most compilers other than GCC do not look for header files in directory @file{/usr/local/include}. So installing the header files this way is only useful with GCC@. Sometimes this is not a problem because some libraries are only really intended to work with GCC@. But some libraries are intended to work with other compilers. They should install their header files in two places, one specified by @code{includedir} and one specified by @code{oldincludedir}. @item oldincludedir The directory for installing @samp{#include} header files for use with compilers other than GCC@. This should normally be @file{/usr/include}. (If you are using Autoconf, you can write it as @samp{@@oldincludedir@@}.) The Makefile commands should check whether the value of @code{oldincludedir} is empty. If it is, they should not try to use it; they should cancel the second installation of the header files. A package should not replace an existing header in this directory unless the header came from the same package. Thus, if your Foo package provides a header file @file{foo.h}, then it should install the header file in the @code{oldincludedir} directory if either (1) there is no @file{foo.h} there or (2) the @file{foo.h} that exists came from the Foo package. To tell whether @file{foo.h} came from the Foo package, put a magic string in the file---part of a comment---and @code{grep} for that string. @item docdir The directory for installing documentation files (other than Info) for this package. By default, it should be @file{/usr/local/share/doc/@var{yourpkg}}, but it should be written as @file{$(datarootdir)/doc/@var{yourpkg}}. (If you are using Autoconf, write it as @samp{@@docdir@@}.) The @var{yourpkg} subdirectory, which may include a version number, prevents collisions among files with common names, such as @file{README}. @item infodir The directory for installing the Info files for this package. By default, it should be @file{/usr/local/share/info}, but it should be written as @file{$(datarootdir)/info}. (If you are using Autoconf, write it as @samp{@@infodir@@}.) @code{infodir} is separate from @code{docdir} for compatibility with existing practice. @item htmldir @itemx dvidir @itemx pdfdir @itemx psdir Directories for installing documentation files in the particular format. They should all be set to @code{$(docdir)} by default. (If you are using Autoconf, write them as @samp{@@htmldir@@}, @samp{@@dvidir@@}, etc.) Packages which supply several translations of their documentation should install them in @samp{$(htmldir)/}@var{ll}, @samp{$(pdfdir)/}@var{ll}, etc. where @var{ll} is a locale abbreviation such as @samp{en} or @samp{pt_BR}. @item libdir The directory for object files and libraries of object code. Do not install executables here, they probably ought to go in @file{$(libexecdir)} instead. The value of @code{libdir} should normally be @file{/usr/local/lib}, but write it as @file{$(exec_prefix)/lib}. (If you are using Autoconf, write it as @samp{@@libdir@@}.) @item lispdir The directory for installing any Emacs Lisp files in this package. By default, it should be @file{/usr/local/share/emacs/site-lisp}, but it should be written as @file{$(datarootdir)/emacs/site-lisp}. If you are using Autoconf, write the default as @samp{@@lispdir@@}. In order to make @samp{@@lispdir@@} work, you need the following lines in your @file{configure.ac} file: @example lispdir='$@{datarootdir@}/emacs/site-lisp' AC_SUBST(lispdir) @end example @item localedir The directory for installing locale-specific message catalogs for this package. By default, it should be @file{/usr/local/share/locale}, but it should be written as @file{$(datarootdir)/locale}. (If you are using Autoconf, write it as @samp{@@localedir@@}.) This directory usually has a subdirectory per locale. @end table Unix-style man pages are installed in one of the following: @table @samp @item mandir The top-level directory for installing the man pages (if any) for this package. It will normally be @file{/usr/local/share/man}, but you should write it as @file{$(datarootdir)/man}. (If you are using Autoconf, write it as @samp{@@mandir@@}.) @item man1dir The directory for installing section 1 man pages. Write it as @file{$(mandir)/man1}. @item man2dir The directory for installing section 2 man pages. Write it as @file{$(mandir)/man2} @item @dots{} @strong{Don't make the primary documentation for any GNU software be a man page. Write a manual in Texinfo instead. Man pages are just for the sake of people running GNU software on Unix, which is a secondary application only.} @item manext The file name extension for the installed man page. This should contain a period followed by the appropriate digit; it should normally be @samp{.1}. @item man1ext The file name extension for installed section 1 man pages. @item man2ext The file name extension for installed section 2 man pages. @item @dots{} Use these names instead of @samp{manext} if the package needs to install man pages in more than one section of the manual. @end table And finally, you should set the following variable: @table @samp @item srcdir The directory for the sources being compiled. The value of this variable is normally inserted by the @code{configure} shell script. (If you are using Autoconf, use @samp{srcdir = @@srcdir@@}.) @end table For example: @smallexample @c I have changed some of the comments here slightly to fix an overfull @c hbox, so the make manual can format correctly. --roland # Common prefix for installation directories. # NOTE: This directory must exist when you start the install. prefix = /usr/local datarootdir = $(prefix)/share datadir = $(datarootdir) exec_prefix = $(prefix) # Where to put the executable for the command 'gcc'. bindir = $(exec_prefix)/bin # Where to put the directories used by the compiler. libexecdir = $(exec_prefix)/libexec # Where to put the Info files. infodir = $(datarootdir)/info @end smallexample If your program installs a large number of files into one of the standard user-specified directories, it might be useful to group them into a subdirectory particular to that program. If you do this, you should write the @code{install} rule to create these subdirectories. Do not expect the user to include the subdirectory name in the value of any of the variables listed above. The idea of having a uniform set of variable names for installation directories is to enable the user to specify the exact same values for several different GNU packages. In order for this to be useful, all the packages must be designed so that they will work sensibly when the user does so. At times, not all of these variables may be implemented in the current release of Autoconf and/or Automake; but as of Autoconf@tie{}2.60, we believe all of them are. When any are missing, the descriptions here serve as specifications for what Autoconf will implement. As a programmer, you can either use a development version of Autoconf or avoid using these variables until a stable release is made which supports them. @node Standard Targets @section Standard Targets for Users All GNU programs should have the following targets in their Makefiles: @table @samp @item all Compile the entire program. This should be the default target. This target need not rebuild any documentation files; Info files should normally be included in the distribution, and DVI (and other documentation format) files should be made only when explicitly asked for. By default, the Make rules should compile and link with @samp{-g}, so that executable programs have debugging symbols. Otherwise, you are essentially helpless in the face of a crash, and it is often far from easy to reproduce with a fresh build. @item install Compile the program and copy the executables, libraries, and so on to the file names where they should reside for actual use. If there is a simple test to verify that a program is properly installed, this target should run that test. Do not strip executables when installing them. This helps eventual debugging that may be needed later, and nowadays disk space is cheap and dynamic loaders typically ensure debug sections are not loaded during normal execution. Users that need stripped binaries may invoke the @code{install-strip} target to do that. If possible, write the @code{install} target rule so that it does not modify anything in the directory where the program was built, provided @samp{make all} has just been done. This is convenient for building the program under one user name and installing it under another. The commands should create all the directories in which files are to be installed, if they don't already exist. This includes the directories specified as the values of the variables @code{prefix} and @code{exec_prefix}, as well as all subdirectories that are needed. One way to do this is by means of an @code{installdirs} target as described below. Use @samp{-} before any command for installing a man page, so that @code{make} will ignore any errors. This is in case there are systems that don't have the Unix man page documentation system installed. The way to install Info files is to copy them into @file{$(infodir)} with @code{$(INSTALL_DATA)} (@pxref{Command Variables}), and then run the @code{install-info} program if it is present. @code{install-info} is a program that edits the Info @file{dir} file to add or update the menu entry for the given Info file; it is part of the Texinfo package. Here is a sample rule to install an Info file that also tries to handle some additional situations, such as @code{install-info} not being present. @comment This example has been carefully formatted for the Make manual. @comment Please do not reformat it without talking to bug-make@gnu.org. @smallexample do-install-info: foo.info installdirs $(NORMAL_INSTALL) # Prefer an info file in . to one in srcdir. if test -f foo.info; then d=.; \ else d="$(srcdir)"; fi; \ $(INSTALL_DATA) $$d/foo.info \ "$(DESTDIR)$(infodir)/foo.info" # Run install-info only if it exists. # Use 'if' instead of just prepending '-' to the # line so we notice real errors from install-info. # Use '$(SHELL) -c' because some shells do not # fail gracefully when there is an unknown command. $(POST_INSTALL) if $(SHELL) -c 'install-info --version' \ >/dev/null 2>&1; then \ install-info --dir-file="$(DESTDIR)$(infodir)/dir" \ "$(DESTDIR)$(infodir)/foo.info"; \ else true; fi @end smallexample When writing the @code{install} target, you must classify all the commands into three categories: normal ones, @dfn{pre-installation} commands and @dfn{post-installation} commands. @xref{Install Command Categories}. @item install-html @itemx install-dvi @itemx install-pdf @itemx install-ps These targets install documentation in formats other than Info; they're intended to be called explicitly by the person installing the package, if that format is desired. GNU prefers Info files, so these must be installed by the @code{install} target. When you have many documentation files to install, we recommend that you avoid collisions and clutter by arranging for these targets to install in subdirectories of the appropriate installation directory, such as @code{htmldir}. As one example, if your package has multiple manuals, and you wish to install HTML documentation with many files (such as the ``split'' mode output by @code{makeinfo --html}), you'll certainly want to use subdirectories, or two nodes with the same name in different manuals will overwrite each other. Please make these @code{install-@var{format}} targets invoke the commands for the @var{format} target, for example, by making @var{format} a dependency. @item uninstall Delete all the installed files---the copies that the @samp{install} and @samp{install-*} targets create. This rule should not modify the directories where compilation is done, only the directories where files are installed. The uninstallation commands are divided into three categories, just like the installation commands. @xref{Install Command Categories}. @item install-strip Like @code{install}, but strip the executable files while installing them. In simple cases, this target can use the @code{install} target in a simple way: @smallexample install-strip: $(MAKE) INSTALL_PROGRAM='$(INSTALL_PROGRAM) -s' \ install @end smallexample But if the package installs scripts as well as real executables, the @code{install-strip} target can't just refer to the @code{install} target; it has to strip the executables but not the scripts. @code{install-strip} should not strip the executables in the build directory which are being copied for installation. It should only strip the copies that are installed. Normally we do not recommend stripping an executable unless you are sure the program has no bugs. However, it can be reasonable to install a stripped executable for actual execution while saving the unstripped executable elsewhere in case there is a bug. @item clean Delete all files in the current directory that are normally created by building the program. Also delete files in other directories if they are created by this makefile. However, don't delete the files that record the configuration. Also preserve files that could be made by building, but normally aren't because the distribution comes with them. There is no need to delete parent directories that were created with @samp{mkdir -p}, since they could have existed anyway. Delete @file{.dvi} files here if they are not part of the distribution. @item distclean Delete all files in the current directory (or created by this makefile) that are created by configuring or building the program. If you have unpacked the source and built the program without creating any other files, @samp{make distclean} should leave only the files that were in the distribution. However, there is no need to delete parent directories that were created with @samp{mkdir -p}, since they could have existed anyway. @item mostlyclean Like @samp{clean}, but may refrain from deleting a few files that people normally don't want to recompile. For example, the @samp{mostlyclean} target for GCC does not delete @file{libgcc.a}, because recompiling it is rarely necessary and takes a lot of time. @item maintainer-clean Delete almost everything that can be reconstructed with this Makefile. This typically includes everything deleted by @code{distclean}, plus more: C source files produced by Bison, tags tables, Info files, and so on. The reason we say ``almost everything'' is that running the command @samp{make maintainer-clean} should not delete @file{configure} even if @file{configure} can be remade using a rule in the Makefile. More generally, @samp{make maintainer-clean} should not delete anything that needs to exist in order to run @file{configure} and then begin to build the program. Also, there is no need to delete parent directories that were created with @samp{mkdir -p}, since they could have existed anyway. These are the only exceptions; @code{maintainer-clean} should delete everything else that can be rebuilt. The @samp{maintainer-clean} target is intended to be used by a maintainer of the package, not by ordinary users. You may need special tools to reconstruct some of the files that @samp{make maintainer-clean} deletes. Since these files are normally included in the distribution, we don't take care to make them easy to reconstruct. If you find you need to unpack the full distribution again, don't blame us. To help make users aware of this, the commands for the special @code{maintainer-clean} target should start with these two: @smallexample @@echo 'This command is intended for maintainers to use; it' @@echo 'deletes files that may need special tools to rebuild.' @end smallexample @item TAGS Update a tags table for this program. @c ADR: how? @item info Generate any Info files needed. The best way to write the rules is as follows: @smallexample info: foo.info foo.info: foo.texi chap1.texi chap2.texi $(MAKEINFO) $(srcdir)/foo.texi @end smallexample @noindent You must define the variable @code{MAKEINFO} in the Makefile. It should run the @code{makeinfo} program, which is part of the Texinfo distribution. Normally a GNU distribution comes with Info files, and that means the Info files are present in the source directory. Therefore, the Make rule for an info file should update it in the source directory. When users build the package, ordinarily Make will not update the Info files because they will already be up to date. @item dvi @itemx html @itemx pdf @itemx ps Generate documentation files in the given format. These targets should always exist, but any or all can be a no-op if the given output format cannot be generated. These targets should not be dependencies of the @code{all} target; the user must manually invoke them. Here's an example rule for generating DVI files from Texinfo: @smallexample dvi: foo.dvi foo.dvi: foo.texi chap1.texi chap2.texi $(TEXI2DVI) $(srcdir)/foo.texi @end smallexample @noindent You must define the variable @code{TEXI2DVI} in the Makefile. It should run the program @code{texi2dvi}, which is part of the Texinfo distribution. (@code{texi2dvi} uses @TeX{} to do the real work of formatting. @TeX{} is not distributed with Texinfo.) Alternatively, write only the dependencies, and allow GNU @code{make} to provide the command. Here's another example, this one for generating HTML from Texinfo: @smallexample html: foo.html foo.html: foo.texi chap1.texi chap2.texi $(TEXI2HTML) $(srcdir)/foo.texi @end smallexample @noindent Again, you would define the variable @code{TEXI2HTML} in the Makefile; for example, it might run @code{makeinfo --no-split --html} (@command{makeinfo} is part of the Texinfo distribution). @item dist Create a distribution tar file for this program. The tar file should be set up so that the file names in the tar file start with a subdirectory name which is the name of the package it is a distribution for. This name can include the version number. For example, the distribution tar file of GCC version 1.40 unpacks into a subdirectory named @file{gcc-1.40}. The easiest way to do this is to create a subdirectory appropriately named, use @code{ln} or @code{cp} to install the proper files in it, and then @code{tar} that subdirectory. Compress the tar file with @code{gzip}. For example, the actual distribution file for GCC version 1.40 is called @file{gcc-1.40.tar.gz}. It is ok to support other free compression formats as well. The @code{dist} target should explicitly depend on all non-source files that are in the distribution, to make sure they are up to date in the distribution. @ifset CODESTD @xref{Releases, , Making Releases}. @end ifset @ifclear CODESTD @xref{Releases, , Making Releases, standards, GNU Coding Standards}. @end ifclear @item check Perform self-tests (if any). The user must build the program before running the tests, but need not install the program; you should write the self-tests so that they work when the program is built but not installed. @end table The following targets are suggested as conventional names, for programs in which they are useful. @table @code @item installcheck Perform installation tests (if any). The user must build and install the program before running the tests. You should not assume that @file{$(bindir)} is in the search path. @item installdirs It's useful to add a target named @samp{installdirs} to create the directories where files are installed, and their parent directories. There is a script called @file{mkinstalldirs} which is convenient for this; you can find it in the Gnulib package. You can use a rule like this: @comment This has been carefully formatted to look decent in the Make manual. @comment Please be sure not to make it extend any further to the right.--roland @smallexample # Make sure all installation directories (e.g. $(bindir)) # actually exist by making them if necessary. installdirs: mkinstalldirs $(srcdir)/mkinstalldirs $(bindir) $(datadir) \ $(libdir) $(infodir) \ $(mandir) @end smallexample @noindent or, if you wish to support @env{DESTDIR} (strongly encouraged), @smallexample # Make sure all installation directories (e.g. $(bindir)) # actually exist by making them if necessary. installdirs: mkinstalldirs $(srcdir)/mkinstalldirs \ $(DESTDIR)$(bindir) $(DESTDIR)$(datadir) \ $(DESTDIR)$(libdir) $(DESTDIR)$(infodir) \ $(DESTDIR)$(mandir) @end smallexample This rule should not modify the directories where compilation is done. It should do nothing but create installation directories. @end table @node Install Command Categories @section Install Command Categories @cindex pre-installation commands @cindex post-installation commands When writing the @code{install} target, you must classify all the commands into three categories: normal ones, @dfn{pre-installation} commands and @dfn{post-installation} commands. Normal commands move files into their proper places, and set their modes. They may not alter any files except the ones that come entirely from the package they belong to. Pre-installation and post-installation commands may alter other files; in particular, they can edit global configuration files or data bases. Pre-installation commands are typically executed before the normal commands, and post-installation commands are typically run after the normal commands. The most common use for a post-installation command is to run @code{install-info}. This cannot be done with a normal command, since it alters a file (the Info directory) which does not come entirely and solely from the package being installed. It is a post-installation command because it needs to be done after the normal command which installs the package's Info files. Most programs don't need any pre-installation commands, but we have the feature just in case it is needed. To classify the commands in the @code{install} rule into these three categories, insert @dfn{category lines} among them. A category line specifies the category for the commands that follow. A category line consists of a tab and a reference to a special Make variable, plus an optional comment at the end. There are three variables you can use, one for each category; the variable name specifies the category. Category lines are no-ops in ordinary execution because these three Make variables are normally undefined (and you @emph{should not} define them in the makefile). Here are the three possible category lines, each with a comment that explains what it means: @smallexample $(PRE_INSTALL) # @r{Pre-install commands follow.} $(POST_INSTALL) # @r{Post-install commands follow.} $(NORMAL_INSTALL) # @r{Normal commands follow.} @end smallexample If you don't use a category line at the beginning of the @code{install} rule, all the commands are classified as normal until the first category line. If you don't use any category lines, all the commands are classified as normal. These are the category lines for @code{uninstall}: @smallexample $(PRE_UNINSTALL) # @r{Pre-uninstall commands follow.} $(POST_UNINSTALL) # @r{Post-uninstall commands follow.} $(NORMAL_UNINSTALL) # @r{Normal commands follow.} @end smallexample Typically, a pre-uninstall command would be used for deleting entries from the Info directory. If the @code{install} or @code{uninstall} target has any dependencies which act as subroutines of installation, then you should start @emph{each} dependency's commands with a category line, and start the main target's commands with a category line also. This way, you can ensure that each command is placed in the right category regardless of which of the dependencies actually run. Pre-installation and post-installation commands should not run any programs except for these: @example [ basename bash cat chgrp chmod chown cmp cp dd diff echo egrep expand expr false fgrep find getopt grep gunzip gzip hostname install install-info kill ldconfig ln ls md5sum mkdir mkfifo mknod mv printenv pwd rm rmdir sed sort tee test touch true uname xargs yes @end example @cindex binary packages The reason for distinguishing the commands in this way is for the sake of making binary packages. Typically a binary package contains all the executables and other files that need to be installed, and has its own method of installing them---so it does not need to run the normal installation commands. But installing the binary package does need to execute the pre-installation and post-installation commands. Programs to build binary packages work by extracting the pre-installation and post-installation commands. Here is one way of extracting the pre-installation commands (the @option{-s} option to @command{make} is needed to silence messages about entering subdirectories): @smallexample make -s -n install -o all \ PRE_INSTALL=pre-install \ POST_INSTALL=post-install \ NORMAL_INSTALL=normal-install \ | gawk -f pre-install.awk @end smallexample @noindent where the file @file{pre-install.awk} could contain this: @smallexample $0 ~ /^(normal-install|post-install)[ \t]*$/ @{on = 0@} on @{print $0@} $0 ~ /^pre-install[ \t]*$/ @{on = 1@} @end smallexample �����������������������������������������������������������������������������������������autoconf-2.71/doc/local.mk��������������������������������������������������������������������������0000644�0000000�0000000�00000002550�14004621270�012336� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������# Make Autoconf documentation. # Copyright (C) 2000-2003, 2007-2017, 2020-2021 Free Software # Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. AM_MAKEINFOFLAGS = --no-split TEXI2HTML_FLAGS = -split_chapter TEXINFO_TEX = build-aux/texinfo.tex info_TEXINFOS = doc/autoconf.texi doc/standards.texi doc_autoconf_TEXINFOS = doc/fdl.texi doc/install.texi doc_standards_TEXINFOS = doc/fdl.texi doc/gnu-oids.texi doc/make-stds.texi EXTRA_DIST += doc/gendocs_template # Files from texi2dvi that should be removed, but which Automake does # not know. CLEANFILES += \ autoconf.ACs \ autoconf.cvs \ autoconf.MSs \ autoconf.prs \ autoconf.ATs \ autoconf.evs \ autoconf.fns \ autoconf.ovs \ autoconf.ca \ autoconf.CA \ autoconf.cas \ autoconf.CAs \ autoconf.tmp ��������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/doc/autoconf.texi���������������������������������������������������������������������0000644�0000000�0000000�00003665172�14004621270�013446� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������\input texinfo @c -*-texinfo-*- @comment ======================================================== @comment %**start of header @setfilename autoconf.info @include version.texi @settitle Autoconf @documentencoding UTF-8 @set txicodequoteundirected @set txicodequotebacktick @setchapternewpage odd @finalout @c @ovar(ARG) @c ---------- @c The ARG is an optional argument. To be used for macro arguments in @c their documentation (@defmac). @macro ovar{varname} @r{[}@var{\varname\}@r{]} @end macro @c @dvar(ARG, DEFAULT) @c ------------------- @c The ARG is an optional argument, defaulting to DEFAULT. To be used @c for macro arguments in their documentation (@defmac). @macro dvar{varname, default} @r{[}@var{\varname\} = @samp{\default\}@r{]} @end macro @c @dvarv(ARG, DEFAULT-VAR) @c ------------------------ @c Same as @dvar{ARG, DEFAULT-VAR}, but with @var instead of @samp @c around DEFAULT-VAR. @macro dvarv{varname, default} @r{[}@var{\varname\} = @var{\default\}@r{]}@c @end macro @c Handling the indexes with Texinfo yields several different problems. @c @c Because we want to drop out the AC_ part of the macro names in the @c printed manual, but not in the other outputs, we need a layer above @c the usual @acindex{} etc. That's why we first define indexes such as @c acx meant to become the macro @acindex. First of all, using 'ac_' @c does not work with makeinfo, and using 'ac1' doesn't work with TeX. @c So use something more regular 'acx'. Then you finish with a printed @c index saying 'index is not existent'. Of course: you ought to use @c two letters :( So you use capitals. @c @c Second, when defining a macro in the TeX world, following spaces are @c eaten. But then, since we embed @acxindex commands that use the end @c of line as an end marker, the whole things wrecks itself. So make @c sure you do *force* an additional end of line, add a '@c'. @c @c Finally, you might want to get rid of TeX expansion, using --expand @c with texi2dvi. But then you wake up an old problem: we use macros @c in @defmac etc. where TeX does perform the expansion, but not makeinfo. @c Define an environment variable index, for variables users may set @c in their environment or on the configure command line. @defcodeindex ev @c Define an output variable index, for commonly AC_SUBST'ed variables. @defcodeindex ov @c Define a cache variable index, for variables matching *_cv_*. @defcodeindex CA @c Other shell variables not fitting the above categories should be @c listed in the predefined vrindex, which we merge in the concept index. @syncodeindex vr cp @c Define a CPP preprocessor macro index, for #define'd strings. @defcodeindex cv @c Define an Autoconf macro index that @defmac doesn't write to. @defcodeindex AC @c Define an Autotest macro index that @defmac doesn't write to. @defcodeindex AT @c Define an M4sugar macro index that @defmac doesn't write to. @defcodeindex MS @c Define an index for *foreign* programs: 'mv' etc. Used for the @c portability sections and so on. @defindex pr @c shortindexflag @c -------------- @c Shall we factor AC_ out of the Autoconf macro index etc.? @iftex @set shortindexflag @end iftex @c @acindex{MACRO} @c --------------- @c Registering an AC_\MACRO\. @ifset shortindexflag @macro acindex{macro} @ACindex \macro\ @c @end macro @end ifset @ifclear shortindexflag @macro acindex{macro} @ACindex AC_\macro\ @end macro @end ifclear @c @ahindex{MACRO} @c --------------- @c Registering an AH_\MACRO\. @macro ahindex{macro} @ACindex AH_\macro\ @c @end macro @c @asindex{MACRO} @c --------------- @c Registering an AS_\MACRO\. @ifset shortindexflag @macro asindex{macro} @MSindex \macro\ @c @end macro @end ifset @ifclear shortindexflag @macro asindex{macro} @MSindex AS_\macro\ @end macro @end ifclear @c @atindex{MACRO} @c --------------- @c Registering an AT_\MACRO\. @ifset shortindexflag @macro atindex{macro} @ATindex \macro\ @c @end macro @end ifset @ifclear shortindexflag @macro atindex{macro} @ATindex AT_\macro\ @end macro @end ifclear @c @auindex{MACRO} @c --------------- @c Registering an AU_\MACRO\. @macro auindex{macro} @ACindex AU_\macro\ @c @end macro @c @hdrindex{MACRO} @c ---------------- @c Indexing a header. @macro hdrindex{macro} @prindex @file{\macro\} @c @end macro @c @msindex{MACRO} @c --------------- @c Registering an m4_\MACRO\. @ifset shortindexflag @macro msindex{macro} @MSindex \macro\ @c @end macro @end ifset @ifclear shortindexflag @macro msindex{macro} @MSindex m4_\macro\ @end macro @end ifclear @c @caindex{VARIABLE} @c ------------------ @c Registering an ac_cv_\VARIABLE\ cache variable. @ifset shortindexflag @macro caindex{macro} @CAindex \macro\ @end macro @end ifset @ifclear shortindexflag @macro caindex{macro} @CAindex ac_cv_\macro\ @end macro @end ifclear @c Define an index for functions: `alloca' etc. Used for the @c portability sections and so on. We can't use `fn' (aka `fnindex), @c since `@defmac' goes into it => we'd get all the macros too. @c FIXME: Aaarg! It seems there are too many indices for TeX :( @c @c ! No room for a new @write . @c l.112 @defcodeindex fu @c @c so don't define yet another one :( Just put some tags before each @c @prindex which is actually a @funindex. @c @c @defcodeindex fu @c @c @c @c Put the programs and functions into their own index. @c @syncodeindex fu pr @comment %**end of header @comment ======================================================== @copying This manual (@value{UPDATED}) is for GNU Autoconf (version @value{VERSION}), a package for creating scripts to configure source code packages using templates and an M4 macro package. Copyright @copyright{} 1992--1996, 1998--2017, 2020--2021 Free Software Foundation, Inc. @quotation Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover texts, and no Back-Cover Texts. A copy of the license is included in the section entitled ``GNU Free Documentation License.'' @end quotation @end copying @dircategory Software development @direntry * Autoconf: (autoconf). Create source code configuration scripts. @end direntry @dircategory Individual utilities @direntry * autoscan: (autoconf)autoscan Invocation. Semi-automatic @file{configure.ac} writing * ifnames: (autoconf)ifnames Invocation. Listing conditionals in source. * autoconf-invocation: (autoconf)autoconf Invocation. How to create configuration scripts * autoreconf: (autoconf)autoreconf Invocation. Remaking multiple @command{configure} scripts * autoheader: (autoconf)autoheader Invocation. How to create configuration templates * autom4te: (autoconf)autom4te Invocation. The Autoconf executables backbone * configure: (autoconf)configure Invocation. Configuring a package. * autoupdate: (autoconf)autoupdate Invocation. Automatic update of @file{configure.ac} * config.status: (autoconf)config.status Invocation. Recreating configurations. * testsuite: (autoconf)testsuite Invocation. Running an Autotest test suite. @end direntry @titlepage @title Autoconf @subtitle Creating Automatic Configuration Scripts @subtitle for version @value{VERSION}, @value{UPDATED} @author David MacKenzie @author Ben Elliston @author Akim Demaille @page @vskip 0pt plus 1filll @insertcopying @end titlepage @contents @ifnottex @node Top @top Autoconf @insertcopying @end ifnottex @c The master menu, created with texinfo-master-menu, goes here. @menu * Introduction:: Autoconf's purpose, strengths, and weaknesses * The GNU Build System:: A set of tools for portable software packages * Making configure Scripts:: How to organize and produce Autoconf scripts * Setup:: Initialization and output * Existing Tests:: Macros that check for particular features * Writing Tests:: How to write new feature checks * Results:: What to do with results from feature checks * Programming in M4:: Layers on top of which Autoconf is written * Programming in M4sh:: Shell portability layer * Writing Autoconf Macros:: Adding new macros to Autoconf * Portable Shell:: Shell script portability pitfalls * Portable Make:: Makefile portability pitfalls * Portable C and C++:: C and C++ portability pitfalls * Manual Configuration:: Selecting features that can't be guessed * Site Configuration:: Local defaults for @command{configure} * Running configure Scripts:: How to use the Autoconf output * config.status Invocation:: Recreating a configuration * Obsolete Constructs:: Kept for backward compatibility * Using Autotest:: Creating portable test suites * FAQ:: Frequent Autoconf Questions, with answers * History:: History of Autoconf * GNU Free Documentation License:: License for copying this manual * Indices:: Indices of symbols, concepts, etc. @detailmenu --- The Detailed Node Listing --- The GNU Build System * Automake:: Escaping makefile hell * Gnulib:: The GNU portability library * Libtool:: Building libraries portably * Pointers:: More info on the GNU build system Making @command{configure} Scripts * Writing Autoconf Input:: What to put in an Autoconf input file * autoscan Invocation:: Semi-automatic @file{configure.ac} writing * ifnames Invocation:: Listing the conditionals in source code * autoconf Invocation:: How to create configuration scripts * autoreconf Invocation:: Remaking multiple @command{configure} scripts Writing @file{configure.ac} * Shell Script Compiler:: Autoconf as solution of a problem * Autoconf Language:: Programming in Autoconf * Autoconf Input Layout:: Standard organization of @file{configure.ac} Initialization and Output Files * Initializing configure:: Option processing etc. * Versioning:: Dealing with Autoconf versions * Notices:: Copyright, version numbers in @command{configure} * Input:: Where Autoconf should find files * Output:: Outputting results from the configuration * Configuration Actions:: Preparing the output based on results * Configuration Files:: Creating output files * Makefile Substitutions:: Using output variables in makefiles * Configuration Headers:: Creating a configuration header file * Configuration Commands:: Running arbitrary instantiation commands * Configuration Links:: Links depending on the configuration * Subdirectories:: Configuring independent packages together * Default Prefix:: Changing the default installation prefix Substitutions in Makefiles * Preset Output Variables:: Output variables that are always set * Installation Directory Variables:: Other preset output variables * Changed Directory Variables:: Warnings about @file{datarootdir} * Build Directories:: Supporting multiple concurrent compiles * Automatic Remaking:: Makefile rules for configuring Configuration Header Files * Header Templates:: Input for the configuration headers * autoheader Invocation:: How to create configuration templates * Autoheader Macros:: How to specify CPP templates Existing Tests * Common Behavior:: Macros' standard schemes * Alternative Programs:: Selecting between alternative programs * Files:: Checking for the existence of files * Libraries:: Library archives that might be missing * Library Functions:: C library functions that might be missing * Header Files:: Header files that might be missing * Declarations:: Declarations that may be missing * Structures:: Structures or members that might be missing * Types:: Types that might be missing * Compilers and Preprocessors:: Checking for compiling programs * System Services:: Operating system services * C and Posix Variants:: Kludges for C and Posix variants * Erlang Libraries:: Checking for the existence of Erlang libraries Common Behavior * Standard Symbols:: Symbols defined by the macros * Default Includes:: Includes used by the generic macros Alternative Programs * Particular Programs:: Special handling to find certain programs * Generic Programs:: How to find other programs Library Functions * Function Portability:: Pitfalls with usual functions * Particular Functions:: Special handling to find certain functions * Generic Functions:: How to find other functions Header Files * Header Portability:: Collected knowledge on common headers * Particular Headers:: Special handling to find certain headers * Generic Headers:: How to find other headers Declarations * Particular Declarations:: Macros to check for certain declarations * Generic Declarations:: How to find other declarations Structures * Particular Structures:: Macros to check for certain structure members * Generic Structures:: How to find other structure members Types * Particular Types:: Special handling to find certain types * Generic Types:: How to find other types Compilers and Preprocessors * Specific Compiler Characteristics:: Some portability issues * Generic Compiler Characteristics:: Language independent tests and features * C Compiler:: Checking its characteristics * C++ Compiler:: Likewise * Objective C Compiler:: Likewise * Objective C++ Compiler:: Likewise * Erlang Compiler and Interpreter:: Likewise * Fortran Compiler:: Likewise * Go Compiler:: Likewise Writing Tests * Language Choice:: Selecting which language to use for testing * Writing Test Programs:: Forging source files for compilers * Running the Preprocessor:: Detecting preprocessor symbols * Running the Compiler:: Detecting language or header features * Running the Linker:: Detecting library features * Runtime:: Testing for runtime features * Systemology:: A zoology of operating systems * Multiple Cases:: Tests for several possible values Writing Test Programs * Guidelines:: General rules for writing test programs * Test Functions:: Avoiding pitfalls in test programs * Generating Sources:: Source program boilerplate Results of Tests * Defining Symbols:: Defining C preprocessor symbols * Setting Output Variables:: Replacing variables in output files * Special Chars in Variables:: Characters to beware of in variables * Caching Results:: Speeding up subsequent @command{configure} runs * Printing Messages:: Notifying @command{configure} users Caching Results * Cache Variable Names:: Shell variables used in caches * Cache Files:: Files @command{configure} uses for caching * Cache Checkpointing:: Loading and saving the cache file Programming in M4 * M4 Quotation:: Protecting macros from unwanted expansion * Using autom4te:: The Autoconf executables backbone * Programming in M4sugar:: Convenient pure M4 macros * Debugging via autom4te:: Figuring out what M4 was doing M4 Quotation * Active Characters:: Characters that change the behavior of M4 * One Macro Call:: Quotation and one macro call * Quoting and Parameters:: M4 vs. shell parameters * Quotation and Nested Macros:: Macros calling macros * Changequote is Evil:: Worse than INTERCAL: M4 + changequote * Quadrigraphs:: Another way to escape special characters * Balancing Parentheses:: Dealing with unbalanced parentheses * Quotation Rule Of Thumb:: One parenthesis, one quote Using @command{autom4te} * autom4te Invocation:: A GNU M4 wrapper * Customizing autom4te:: Customizing the Autoconf package Programming in M4sugar * Redefined M4 Macros:: M4 builtins changed in M4sugar * Diagnostic Macros:: Diagnostic messages from M4sugar * Diversion support:: Diversions in M4sugar * Conditional constructs:: Conditions in M4 * Looping constructs:: Iteration in M4 * Evaluation Macros:: More quotation and evaluation control * Text processing Macros:: String manipulation in M4 * Number processing Macros:: Arithmetic computation in M4 * Set manipulation Macros:: Set manipulation in M4 * Forbidden Patterns:: Catching unexpanded macros Programming in M4sh * Common Shell Constructs:: Portability layer for common shell constructs * Polymorphic Variables:: Support for indirect variable names * Initialization Macros:: Macros to establish a sane shell environment * File Descriptor Macros:: File descriptor macros for input and output Writing Autoconf Macros * Macro Definitions:: Basic format of an Autoconf macro * Macro Names:: What to call your new macros * Dependencies Between Macros:: What to do when macros depend on other macros * Obsoleting Macros:: Warning about old ways of doing things * Coding Style:: Writing Autoconf macros @`a la Autoconf Dependencies Between Macros * Prerequisite Macros:: Ensuring required information * Suggested Ordering:: Warning about possible ordering problems * One-Shot Macros:: Ensuring a macro is called only once Portable Shell Programming * Shellology:: A zoology of shells * Invoking the Shell:: Invoking the shell as a command * Here-Documents:: Quirks and tricks * File Descriptors:: FDs and redirections * Signal Handling:: Shells, signals, and headaches * File System Conventions:: File names * Shell Pattern Matching:: Pattern matching * Shell Substitutions:: Variable and command expansions * Assignments:: Varying side effects of assignments * Parentheses:: Parentheses in shell scripts * Slashes:: Slashes in shell scripts * Special Shell Variables:: Variables you should not change * Shell Functions:: What to look out for if you use them * Limitations of Builtins:: Portable use of not so portable /bin/sh * Limitations of Usual Tools:: Portable use of portable tools Portable Make Programming * $< in Ordinary Make Rules:: $< in ordinary rules * Failure in Make Rules:: Failing portably in rules * Special Chars in Names:: Special Characters in Macro Names * Backslash-Newline-Empty:: Empty lines after backslash-newline * Backslash-Newline Comments:: Spanning comments across line boundaries * Long Lines in Makefiles:: Line length limitations * Macros and Submakes:: @code{make macro=value} and submakes * The Make Macro MAKEFLAGS:: @code{$(MAKEFLAGS)} portability issues * The Make Macro SHELL:: @code{$(SHELL)} portability issues * Parallel Make:: Parallel @command{make} quirks * Comments in Make Rules:: Other problems with Make comments * Newlines in Make Rules:: Using literal newlines in rules * Comments in Make Macros:: Other problems with Make comments in macros * Trailing whitespace in Make Macros:: Macro substitution problems * Command-line Macros and whitespace:: Whitespace trimming of values * obj/ and Make:: Don't name a subdirectory @file{obj} * make -k Status:: Exit status of @samp{make -k} * VPATH and Make:: @code{VPATH} woes * Single Suffix Rules:: Single suffix rules and separated dependencies * Timestamps and Make:: Sub-second timestamp resolution @code{VPATH} and Make * Variables listed in VPATH:: @code{VPATH} must be literal on ancient hosts * VPATH and Double-colon:: Problems with @samp{::} on ancient hosts * $< in Explicit Rules:: @code{$<} does not work in ordinary rules * Automatic Rule Rewriting:: @code{VPATH} goes wild on Solaris * Tru64 Directory Magic:: @command{mkdir} goes wild on Tru64 * Make Target Lookup:: More details about @code{VPATH} lookup Portable C and C++ Programming * Varieties of Unportability:: How to make your programs unportable * Integer Overflow:: When integers get too large * Preprocessor Arithmetic:: @code{#if} expression problems * Null Pointers:: Properties of null pointers * Buffer Overruns:: Subscript errors and the like * Volatile Objects:: @code{volatile} and signals * Floating Point Portability:: Portable floating-point arithmetic * Exiting Portably:: Exiting and the exit status Integer Overflow * Integer Overflow Basics:: Why integer overflow is a problem * Signed Overflow Examples:: Examples of code assuming wraparound * Optimization and Wraparound:: Optimizations that break uses of wraparound * Signed Overflow Advice:: Practical advice for signed overflow issues * Signed Integer Division:: @code{INT_MIN / -1} and @code{INT_MIN % -1} Manual Configuration * Specifying Target Triplets:: Specifying target triplets * Canonicalizing:: Getting the canonical system type * Using System Type:: What to do with the system type Site Configuration * Help Formatting:: Customizing @samp{configure --help} * External Software:: Working with other optional software * Package Options:: Selecting optional features * Pretty Help Strings:: Formatting help string * Option Checking:: Controlling checking of @command{configure} options * Site Details:: Configuring site details * Transforming Names:: Changing program names when installing * Site Defaults:: Giving @command{configure} local defaults Transforming Program Names When Installing * Transformation Options:: @command{configure} options to transform names * Transformation Examples:: Sample uses of transforming names * Transformation Rules:: Makefile uses of transforming names Running @command{configure} Scripts * Basic Installation:: Instructions for typical cases * Compilers and Options:: Selecting compilers and optimization * Multiple Architectures:: Compiling for multiple architectures at once * Installation Names:: Installing in different directories * Optional Features:: Selecting optional features * Particular Systems:: Particular systems * System Type:: Specifying the system type * Sharing Defaults:: Setting site-wide defaults for @command{configure} * Defining Variables:: Specifying the compiler etc. * configure Invocation:: Changing how @command{configure} runs Obsolete Constructs * Obsolete config.status Use:: Obsolete convention for @command{config.status} * acconfig Header:: Additional entries in @file{config.h.in} * autoupdate Invocation:: Automatic update of @file{configure.ac} * Obsolete Macros:: Backward compatibility macros * Autoconf 1:: Tips for upgrading your files * Autoconf 2.13:: Some fresher tips Upgrading From Version 1 * Changed File Names:: Files you might rename * Changed Makefiles:: New things to put in @file{Makefile.in} * Changed Macros:: Macro calls you might replace * Changed Results:: Changes in how to check test results * Changed Macro Writing:: Better ways to write your own macros Upgrading From Version 2.13 * Changed Quotation:: Broken code which used to work * New Macros:: Interaction with foreign macros * Hosts and Cross-Compilation:: Bugward compatibility kludges * AC_LIBOBJ vs LIBOBJS:: LIBOBJS is a forbidden token * AC_ACT_IFELSE vs AC_TRY_ACT:: A more generic scheme for testing sources Generating Test Suites with Autotest * Using an Autotest Test Suite:: Autotest and the user * Writing Testsuites:: Autotest macros * testsuite Invocation:: Running @command{testsuite} scripts * Making testsuite Scripts:: Using autom4te to create @command{testsuite} Using an Autotest Test Suite * testsuite Scripts:: The concepts of Autotest * Autotest Logs:: Their contents Frequent Autoconf Questions, with answers * Distributing:: Distributing @command{configure} scripts * Why GNU M4:: Why not use the standard M4? * Bootstrapping:: Autoconf and GNU M4 require each other? * Why Not Imake:: Why GNU uses @command{configure} instead of Imake * Defining Directories:: Passing @code{datadir} to program * Autom4te Cache:: What is it? Can I remove it? * Present But Cannot Be Compiled:: Compiler and Preprocessor Disagree * Expanded Before Required:: Expanded Before Required * Debugging:: Debugging @command{configure} scripts History of Autoconf * Genesis:: Prehistory and naming of @command{configure} * Exodus:: The plagues of M4 and Perl * Leviticus:: The priestly code of portability arrives * Numbers:: Growth and contributors * Deuteronomy:: Approaching the promises of easy configuration Indices * Environment Variable Index:: Index of environment variables used * Output Variable Index:: Index of variables set in output files * Preprocessor Symbol Index:: Index of C preprocessor symbols defined * Cache Variable Index:: Index of documented cache variables * Autoconf Macro Index:: Index of Autoconf macros * M4 Macro Index:: Index of M4, M4sugar, and M4sh macros * Autotest Macro Index:: Index of Autotest macros * Program & Function Index:: Index of those with portability problems * Concept Index:: General index @end detailmenu @end menu @c ============================================================= Introduction. @node Introduction @chapter Introduction @cindex Introduction @flushright A physicist, an engineer, and a computer scientist were discussing the nature of God. ``Surely a Physicist,'' said the physicist, ``because early in the Creation, God made Light; and you know, Maxwell's equations, the dual nature of electromagnetic waves, the relativistic consequences@enddots{}'' ``An Engineer!,'' said the engineer, ``because before making Light, God split the Chaos into Land and Water; it takes a hell of an engineer to handle that big amount of mud, and orderly separation of solids from liquids@enddots{}'' The computer scientist shouted: ``And the Chaos, where do you think it was coming from, hmm?'' ---Anonymous @end flushright @c (via Franc,ois Pinard) Autoconf is a tool for producing shell scripts that automatically configure software source code packages to adapt to many kinds of Posix-like systems. The configuration scripts produced by Autoconf are independent of Autoconf when they are run, so their users do not need to have Autoconf. The configuration scripts produced by Autoconf require no manual user intervention when run; they do not normally even need an argument specifying the system type. Instead, they individually test for the presence of each feature that the software package they are for might need. (Before each check, they print a one-line message stating what they are checking for, so the user doesn't get too bored while waiting for the script to finish.) As a result, they deal well with systems that are hybrids or customized from the more common Posix variants. There is no need to maintain files that list the features supported by each release of each variant of Posix. For each software package that Autoconf is used with, it creates a configuration script from a template file that lists the system features that the package needs or can use. After the shell code to recognize and respond to a system feature has been written, Autoconf allows it to be shared by many software packages that can use (or need) that feature. If it later turns out that the shell code needs adjustment for some reason, it needs to be changed in only one place; all of the configuration scripts can be regenerated automatically to take advantage of the updated code. @c "Those who do not understand Unix are condemned to reinvent it, poorly." @c --Henry Spencer, 1987 (see https://en.wikipedia.org/wiki/Unix_philosophy) Those who do not understand Autoconf are condemned to reinvent it, poorly. The primary goal of Autoconf is making the @emph{user's} life easier; making the @emph{maintainer's} life easier is only a secondary goal. Put another way, the primary goal is not to make the generation of @file{configure} automatic for package maintainers (although patches along that front are welcome, since package maintainers form the user base of Autoconf); rather, the goal is to make @file{configure} painless, portable, and predictable for the end user of each @dfn{autoconfiscated} package. And to this degree, Autoconf is highly successful at its goal---most complaints to the Autoconf list are about difficulties in writing Autoconf input, and not in the behavior of the resulting @file{configure}. Even packages that don't use Autoconf will generally provide a @file{configure} script, and the most common complaint about these alternative home-grown scripts is that they fail to meet one or more of the GNU Coding Standards (@pxref{Configuration, , , standards, The GNU Coding Standards}) that users have come to expect from Autoconf-generated @file{configure} scripts. The Metaconfig package is similar in purpose to Autoconf, but the scripts it produces require manual user intervention, which is quite inconvenient when configuring large source trees. Unlike Metaconfig scripts, Autoconf scripts can support cross-compiling, if some care is taken in writing them. Autoconf does not solve all problems related to making portable software packages---for a more complete solution, it should be used in concert with other GNU build tools like Automake and Libtool. These other tools take on jobs like the creation of a portable, recursive makefile with all of the standard targets, linking of shared libraries, and so on. @xref{The GNU Build System}, for more information. Autoconf imposes some restrictions on the names of macros used with @code{#if} in C programs (@pxref{Preprocessor Symbol Index}). Autoconf requires GNU M4 version 1.4.6 or later in order to generate the scripts. It uses features that some versions of M4, including GNU M4 1.3, do not have. Autoconf works better with GNU M4 version 1.4.14 or later, though this is not required. @xref{Autoconf 1}, for information about upgrading from version 1. @xref{History}, for the story of Autoconf's development. @xref{FAQ}, for answers to some common questions about Autoconf. See the @uref{https://@/www.gnu.org/@/software/@/autoconf/, Autoconf web page} for up-to-date information, details on the mailing lists, pointers to a list of known bugs, etc. Mail suggestions to @email{autoconf@@gnu.org, the Autoconf mailing list}. Past suggestions are @uref{https://@/lists.gnu.org/@/archive/@/html/@/autoconf/, archived}. Mail bug reports to @email{bug-autoconf@@gnu.org, the Autoconf Bugs mailing list}. Past bug reports are @uref{https://@/lists.gnu.org/@/archive/@/html/@/bug-autoconf/, archived}. If possible, first check that your bug is not already solved in current development versions, and that it has not been reported yet. Be sure to include all the needed information and a short @file{configure.ac} that demonstrates the problem. Autoconf's development tree is accessible via @command{git}; see the @uref{https://@/savannah.gnu.org/@/projects/@/autoconf/, Autoconf Summary} for details, or view @uref{https://@/git.savannah.gnu.org/@/cgit/@/autoconf.git, the actual repository}. Patches relative to the current @command{git} version can be sent for review to the @email{autoconf-patches@@gnu.org, Autoconf Patches mailing list}, with discussion on prior patches @uref{https://@/lists.gnu.org/@/archive/@/html/@/autoconf-@/patches/, archived}; and all commits are posted in the read-only @email{autoconf-commit@@gnu.org, Autoconf Commit mailing list}, which is also @uref{https://@/lists.gnu.org/@/archive/@/html/@/autoconf-commit/, archived}. Because of its mission, the Autoconf package itself includes only a set of often-used macros that have already demonstrated their usefulness. Nevertheless, if you wish to share your macros, or find existing ones, see the @uref{https://@/www.gnu.org/@/software/@/autoconf-archive/, Autoconf Macro Archive}, which is kindly run by @email{simons@@cryp.to, Peter Simons}. @c ================================================= The GNU Build System @node The GNU Build System @chapter The GNU Build System @cindex GNU build system Autoconf solves an important problem---reliable discovery of system-specific build and runtime information---but this is only one piece of the puzzle for the development of portable software. To this end, the GNU project has developed a suite of integrated utilities to finish the job Autoconf started: the GNU build system, whose most important components are Autoconf, Automake, and Libtool. In this chapter, we introduce you to those tools, point you to sources of more information, and try to convince you to use the entire GNU build system for your software. @menu * Automake:: Escaping makefile hell * Gnulib:: The GNU portability library * Libtool:: Building libraries portably * Pointers:: More info on the GNU build system @end menu @node Automake @section Automake The ubiquity of @command{make} means that a makefile is almost the only viable way to distribute automatic build rules for software, but one quickly runs into its numerous limitations. Its lack of support for automatic dependency tracking, recursive builds in subdirectories, reliable timestamps (e.g., for network file systems), and so on, mean that developers must painfully (and often incorrectly) reinvent the wheel for each project. Portability is non-trivial, thanks to the quirks of @command{make} on many systems. On top of all this is the manual labor required to implement the many standard targets that users have come to expect (@code{make install}, @code{make distclean}, @code{make uninstall}, etc.). Since you are, of course, using Autoconf, you also have to insert repetitive code in your @file{Makefile.in} to recognize @code{@@CC@@}, @code{@@CFLAGS@@}, and other substitutions provided by @command{configure}. Into this mess steps @dfn{Automake}. @cindex Automake Automake allows you to specify your build needs in a @file{Makefile.am} file with a vastly simpler and more powerful syntax than that of a plain makefile, and then generates a portable @file{Makefile.in} for use with Autoconf. For example, the @file{Makefile.am} to build and install a simple ``Hello world'' program might look like: @example bin_PROGRAMS = hello hello_SOURCES = hello.c @end example @noindent The resulting @file{Makefile.in} (~400 lines) automatically supports all the standard targets, the substitutions provided by Autoconf, automatic dependency tracking, @code{VPATH} building, and so on. @command{make} builds the @code{hello} program, and @code{make install} installs it in @file{/usr/local/bin} (or whatever prefix was given to @command{configure}, if not @file{/usr/local}). The benefits of Automake increase for larger packages (especially ones with subdirectories), but even for small programs the added convenience and portability can be substantial. And that's not all@enddots{} @node Gnulib @section Gnulib GNU software has a well-deserved reputation for running on many different types of systems. While our primary goal is to write software for the GNU system, many users and developers have been introduced to us through the systems that they were already using. @cindex Gnulib Gnulib is a central location for common GNU code, intended to be shared among free software packages. Its components are typically shared at the source level, rather than being a library that gets built, installed, and linked against. The idea is to copy files from Gnulib into your own source tree. There is no distribution tarball; developers should just grab source modules from the repository. The source files are available online, under various licenses, mostly GNU GPL or GNU LGPL. Gnulib modules typically contain C source code along with Autoconf macros used to configure the source code. For example, the Gnulib @code{stdalign} module implements a @file{stdalign.h} header that nearly conforms to C11, even on old-fashioned hosts that lack @file{stdalign.h}. This module contains a source file for the replacement header, along with an Autoconf macro that arranges to use the replacement header on old-fashioned systems. For more information, consult the Gnulib website, @uref{https://@/www.gnu.org/@/software/@/gnulib/}. @node Libtool @section Libtool Often, one wants to build not only programs, but libraries, so that other programs can benefit from the fruits of your labor. Ideally, one would like to produce @emph{shared} (dynamically linked) libraries, which can be used by multiple programs without duplication on disk or in memory and can be updated independently of the linked programs. Producing shared libraries portably, however, is the stuff of nightmares---each system has its own incompatible tools, compiler flags, and magic incantations. Fortunately, GNU provides a solution: @dfn{Libtool}. @cindex Libtool Libtool handles all the requirements of building shared libraries for you, and at this time seems to be the @emph{only} way to do so with any portability. It also handles many other headaches, such as: the interaction of Make rules with the variable suffixes of shared libraries, linking reliably with shared libraries before they are installed by the superuser, and supplying a consistent versioning system (so that different versions of a library can be installed or upgraded without breaking binary compatibility). Although Libtool, like Autoconf, can be used without Automake, it is most simply utilized in conjunction with Automake---there, Libtool is used automatically whenever shared libraries are needed, and you need not know its syntax. @node Pointers @section Pointers Developers who are used to the simplicity of @command{make} for small projects on a single system might be daunted at the prospect of learning to use Automake and Autoconf. As your software is distributed to more and more users, however, you otherwise quickly find yourself putting lots of effort into reinventing the services that the GNU build tools provide, and making the same mistakes that they once made and overcame. (Besides, since you're already learning Autoconf, Automake is a piece of cake.) There are a number of places that you can go to for more information on the GNU build tools. @itemize @minus @item Web The project home pages for @uref{https://@/www@/.gnu@/.org/@/software/@/autoconf/, Autoconf}, @uref{https://@/www@/.gnu@/.org/@/software/@/automake/, Automake}, @uref{https://@/www@/.gnu@/.org/@/software/@/gnulib/, Gnulib}, and @uref{https://@/www@/.gnu@/.org/@/software/@/libtool/, Libtool}. @item Automake Manual @xref{Top, , Automake, automake, GNU Automake}, for more information on Automake. @item Books The book @cite{GNU Autoconf, Automake and Libtool}@footnote{@cite{GNU Autoconf, Automake and Libtool}, by G. V. Vaughan, B. Elliston, T. Tromey, and I. L. Taylor. SAMS (originally New Riders), 2000, ISBN 1578701902.} describes the complete GNU build environment. You can also find @uref{https://@/www.sourceware.org/@/autobook/, the entire book on-line}. @end itemize @c ================================================= Making configure Scripts. @node Making configure Scripts @chapter Making @command{configure} Scripts @cindex @file{aclocal.m4} @cindex @command{configure} The configuration scripts that Autoconf produces are by convention called @command{configure}. When run, @command{configure} creates several files, replacing configuration parameters in them with appropriate values. The files that @command{configure} creates are: @itemize @minus @item one or more @file{Makefile} files, usually one in each subdirectory of the package (@pxref{Makefile Substitutions}); @item optionally, a C header file, the name of which is configurable, containing @code{#define} directives (@pxref{Configuration Headers}); @item a shell script called @file{config.status} that, when run, recreates the files listed above (@pxref{config.status Invocation}); @item an optional shell script normally called @file{config.cache} (created when using @samp{configure --config-cache}) that saves the results of running many of the tests (@pxref{Cache Files}); @item a file called @file{config.log} containing any messages produced by compilers, to help debugging if @command{configure} makes a mistake. @end itemize @cindex @file{configure.ac} To create a @command{configure} script with Autoconf, you need to write an Autoconf input file @file{configure.ac} and run @command{autoconf} on it. If you write your own feature tests to supplement those that come with Autoconf, you might also write files called @file{aclocal.m4} and @file{acsite.m4}. If you use a C header file to contain @code{#define} directives, you might also run @command{autoheader}, and you can distribute the generated file @file{config.h.in} with the package. Here is a diagram showing how the files that can be used in configuration are produced. Programs that are executed are suffixed by @samp{*}. Optional files are enclosed in square brackets (@samp{[]}). @command{autoconf} and @command{autoheader} also read the installed Autoconf macro files (by reading @file{autoconf.m4}). @noindent Files used in preparing a software package for distribution, when using just Autoconf: @example your source files --> [autoscan*] --> [configure.scan] --> configure.ac @group configure.ac --. | .------> autoconf* -----> configure [aclocal.m4] --+---+ | `-----> [autoheader*] --> [config.h.in] [acsite.m4] ---' @end group Makefile.in @end example @noindent Additionally, if you use Automake, the following additional productions come into play: @example @group [acinclude.m4] --. | [local macros] --+--> aclocal* --> aclocal.m4 | configure.ac ----' @end group @group configure.ac --. +--> automake* --> Makefile.in Makefile.am ---' @end group @end example @noindent Files used in configuring a software package: @example @group .-------------> [config.cache] configure* ------------+-------------> config.log | [config.h.in] -. v .-> [config.h] -. +--> config.status* -+ +--> make* Makefile.in ---' `-> Makefile ---' @end group @end example @menu * Writing Autoconf Input:: What to put in an Autoconf input file * autoscan Invocation:: Semi-automatic @file{configure.ac} writing * ifnames Invocation:: Listing the conditionals in source code * autoconf Invocation:: How to create configuration scripts * autoreconf Invocation:: Remaking multiple @command{configure} scripts @end menu @node Writing Autoconf Input @section Writing @file{configure.ac} To produce a @command{configure} script for a software package, create a file called @file{configure.ac} that contains invocations of the Autoconf macros that test the system features your package needs or can use. Autoconf macros already exist to check for many features; see @ref{Existing Tests}, for their descriptions. For most other features, you can use Autoconf template macros to produce custom checks; see @ref{Writing Tests}, for information about them. For especially tricky or specialized features, @file{configure.ac} might need to contain some hand-crafted shell commands; see @ref{Portable Shell, , Portable Shell Programming}. The @command{autoscan} program can give you a good start in writing @file{configure.ac} (@pxref{autoscan Invocation}, for more information). @cindex @file{configure.in} Previous versions of Autoconf promoted the name @file{configure.in}, which is somewhat ambiguous (the tool needed to process this file is not described by its extension), and introduces a slight confusion with @file{config.h.in} and so on (for which @samp{.in} means ``to be processed by @command{configure}''). Using @file{configure.ac} is now preferred, while the use of @file{configure.in} will cause warnings from @command{autoconf}. @menu * Shell Script Compiler:: Autoconf as solution of a problem * Autoconf Language:: Programming in Autoconf * Autoconf Input Layout:: Standard organization of @file{configure.ac} @end menu @node Shell Script Compiler @subsection A Shell Script Compiler Just as for any other computer language, in order to properly program @file{configure.ac} in Autoconf you must understand @emph{what} problem the language tries to address and @emph{how} it does so. The problem Autoconf addresses is that the world is a mess. After all, you are using Autoconf in order to have your package compile easily on all sorts of different systems, some of them being extremely hostile. Autoconf itself bears the price for these differences: @command{configure} must run on all those systems, and thus @command{configure} must limit itself to their lowest common denominator of features. Naturally, you might then think of shell scripts; who needs @command{autoconf}? A set of properly written shell functions is enough to make it easy to write @command{configure} scripts by hand. Sigh! Unfortunately, even in 2008, where shells without any function support are far and few between, there are pitfalls to avoid when making use of them. Also, finding a Bourne shell that accepts shell functions is not trivial, even though there is almost always one on interesting porting targets. So, what is really needed is some kind of compiler, @command{autoconf}, that takes an Autoconf program, @file{configure.ac}, and transforms it into a portable shell script, @command{configure}. How does @command{autoconf} perform this task? There are two obvious possibilities: creating a brand new language or extending an existing one. The former option is attractive: all sorts of optimizations could easily be implemented in the compiler and many rigorous checks could be performed on the Autoconf program (e.g., rejecting any non-portable construct). Alternatively, you can extend an existing language, such as the @code{sh} (Bourne shell) language. Autoconf does the latter: it is a layer on top of @code{sh}. It was therefore most convenient to implement @command{autoconf} as a macro expander: a program that repeatedly performs @dfn{macro expansions} on text input, replacing macro calls with macro bodies and producing a pure @code{sh} script in the end. Instead of implementing a dedicated Autoconf macro expander, it is natural to use an existing general-purpose macro language, such as M4, and implement the extensions as a set of M4 macros. @node Autoconf Language @subsection The Autoconf Language @cindex quotation The Autoconf language differs from many other computer languages because it treats actual code the same as plain text. Whereas in C, for instance, data and instructions have different syntactic status, in Autoconf their status is rigorously the same. Therefore, we need a means to distinguish literal strings from text to be expanded: quotation. When calling macros that take arguments, there must not be any white space between the macro name and the open parenthesis. @example AC_INIT ([oops], [1.0]) # incorrect AC_INIT([hello], [1.0]) # good @end example Arguments should be enclosed within the quote characters @samp{[} and @samp{]}, and be separated by commas. Any leading blanks or newlines in arguments are ignored, unless they are quoted. You should always quote an argument that might contain a macro name, comma, parenthesis, or a leading blank or newline. This rule applies recursively for every macro call, including macros called from other macros. For more details on quoting rules, see @ref{Programming in M4}. For instance: @example AC_CHECK_HEADER([stdio.h], [AC_DEFINE([HAVE_STDIO_H], [1], [Define to 1 if you have <stdio.h>.])], [AC_MSG_ERROR([sorry, can't do anything for you])]) @end example @noindent is quoted properly. You may safely simplify its quotation to: @example AC_CHECK_HEADER([stdio.h], [AC_DEFINE([HAVE_STDIO_H], 1, [Define to 1 if you have <stdio.h>.])], [AC_MSG_ERROR([sorry, can't do anything for you])]) @end example @noindent because @samp{1} cannot contain a macro call. Here, the argument of @code{AC_MSG_ERROR} must be quoted; otherwise, its comma would be interpreted as an argument separator. Also, the second and third arguments of @samp{AC_CHECK_HEADER} must be quoted, since they contain macro calls. The three arguments @samp{HAVE_STDIO_H}, @samp{stdio.h}, and @samp{Define to 1 if you have <stdio.h>.} do not need quoting, but if you unwisely defined a macro with a name like @samp{Define} or @samp{stdio} then they would need quoting. Cautious Autoconf users would keep the quotes, but many Autoconf users find such precautions annoying, and would rewrite the example as follows: @example AC_CHECK_HEADER(stdio.h, [AC_DEFINE(HAVE_STDIO_H, 1, [Define to 1 if you have <stdio.h>.])], [AC_MSG_ERROR([sorry, can't do anything for you])]) @end example @noindent This is safe, so long as you adopt good naming conventions and do not define macros with names like @samp{HAVE_STDIO_H}, @samp{stdio}, or @samp{h}. Though it is also safe here to omit the quotes around @samp{Define to 1 if you have <stdio.h>.} this is not recommended, as message strings are more likely to inadvertently contain commas. The following example is wrong and dangerous, as it is underquoted: @example AC_CHECK_HEADER(stdio.h, AC_DEFINE(HAVE_STDIO_H, 1, Define to 1 if you have <stdio.h>.), AC_MSG_ERROR([sorry, can't do anything for you])) @end example In other cases, you may want to use text that also resembles a macro call. You must quote that text (whether just the potential problem, or the entire line) even when it is not passed as a macro argument; and you may also have to use @code{m4_pattern_allow} (@pxref{Forbidden Patterns}), to declare your intention that the resulting configure file will have a literal that resembles what would otherwise be reserved for a macro name. For example: @example dnl Simulate a possible future autoconf macro m4_define([AC_DC], [oops]) dnl Underquoted: echo "Hard rock was here! --AC_DC" dnl Correctly quoted: m4_pattern_allow([AC_DC]) echo "Hard rock was here! --[AC_DC]" [echo "Hard rock was here! --AC_DC"] @end example @noindent which results in this text in @file{configure}: @example echo "Hard rock was here! --oops" echo "Hard rock was here! --AC_DC" echo "Hard rock was here! --AC_DC" @end example @noindent When you use the same text in a macro argument, you must therefore have an extra quotation level (since one is stripped away by the macro substitution). In general, then, it is a good idea to @emph{use double quoting for all literal string arguments}, either around just the problematic portions, or over the entire argument: @example m4_pattern_allow([AC_DC]) AC_MSG_WARN([[AC_DC] stinks --Iron Maiden]) AC_MSG_WARN([[AC_DC stinks --Iron Maiden]]) @end example It is also possible to avoid the problematic patterns in the first place, by the use of additional escaping (either a quadrigraph, or creative shell constructs), in which case it is no longer necessary to use @code{m4_pattern_allow}: @example echo "Hard rock was here! --AC""_DC" AC_MSG_WARN([[AC@@&t@@_DC stinks --Iron Maiden]]) @end example You are now able to understand one of the constructs of Autoconf that has been continually misunderstood@enddots{} The rule of thumb is that @emph{whenever you expect macro expansion, expect quote expansion}; i.e., expect one level of quotes to be lost. For instance: @example AC_COMPILE_IFELSE(AC_LANG_SOURCE([char b[10];]), [], [AC_MSG_ERROR([you lose])]) @end example @noindent is incorrect: here, the first argument of @code{AC_LANG_SOURCE} is @samp{char b[10];} and is expanded once, which results in @samp{char b10;}; and the @code{AC_LANG_SOURCE} is also expanded prior to being passed to @code{AC_COMPILE_IFELSE}. (There was an idiom common in Autoconf's past to address this issue via the M4 @code{changequote} primitive, but do not use it!) Let's take a closer look: the author meant the first argument to be understood as a literal, and therefore it must be quoted twice; likewise, the intermediate @code{AC_LANG_SOURCE} macro should be quoted once so that it is only expanded after the rest of the body of @code{AC_COMPILE_IFELSE} is in place: @example AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char b[10];]])], [], [AC_MSG_ERROR([you lose])]) @end example @noindent Voil@`a, you actually produce @samp{char b[10];} this time! On the other hand, descriptions (e.g., the last parameter of @code{AC_DEFINE} or @code{AS_HELP_STRING}) are not literals---they are subject to line breaking, for example---and should not be double quoted. Even if these descriptions are short and are not actually broken, double quoting them yields weird results. Some macros take optional arguments, which this documentation represents as @ovar{arg} (not to be confused with the quote characters). You may just leave them empty, or use @samp{[]} to make the emptiness of the argument explicit, or you may simply omit the trailing commas. The three lines below are equivalent: @example AC_CHECK_HEADERS([stdio.h], [], [], []) AC_CHECK_HEADERS([stdio.h],,,) AC_CHECK_HEADERS([stdio.h]) @end example It is best to put each macro call on its own line in @file{configure.ac}. Most of the macros don't add extra newlines; they rely on the newline after the macro call to terminate the commands. This approach makes the generated @command{configure} script a little easier to read by not inserting lots of blank lines. It is generally safe to set shell variables on the same line as a macro call, because the shell allows assignments without intervening newlines. You can include comments in @file{configure.ac} files by starting them with the @samp{#}. For example, it is helpful to begin @file{configure.ac} files with a line like this: @example # Process this file with autoconf to produce a configure script. @end example @node Autoconf Input Layout @subsection Standard @file{configure.ac} Layout The order in which @file{configure.ac} calls the Autoconf macros is not important, with a few exceptions. Every @file{configure.ac} must contain a call to @code{AC_INIT} before the checks, and a call to @code{AC_OUTPUT} at the end (@pxref{Output}). Additionally, some macros rely on other macros having been called first, because they check previously set values of some variables to decide what to do. These macros are noted in the individual descriptions (@pxref{Existing Tests}), and they also warn you when @command{configure} is created if they are called out of order. To encourage consistency, here is a suggested order for calling the Autoconf macros. Generally speaking, the things near the end of this list are those that could depend on things earlier in it. For example, library functions could be affected by types and libraries. @display @group Autoconf requirements @code{AC_INIT(@var{package}, @var{version}, @var{bug-report-address})} information on the package checks for programs checks for libraries checks for header files checks for types checks for structures checks for compiler characteristics checks for library functions checks for system services @code{AC_CONFIG_FILES(@r{[}@var{file@dots{}}@r{]})} @code{AC_OUTPUT} @end group @end display @node autoscan Invocation @section Using @command{autoscan} to Create @file{configure.ac} @cindex @command{autoscan} The @command{autoscan} program can help you create and/or maintain a @file{configure.ac} file for a software package. @command{autoscan} examines source files in the directory tree rooted at a directory given as a command line argument, or the current directory if none is given. It searches the source files for common portability problems and creates a file @file{configure.scan} which is a preliminary @file{configure.ac} for that package, and checks a possibly existing @file{configure.ac} for completeness. When using @command{autoscan} to create a @file{configure.ac}, you should manually examine @file{configure.scan} before renaming it to @file{configure.ac}; it probably needs some adjustments. Occasionally, @command{autoscan} outputs a macro in the wrong order relative to another macro, so that @command{autoconf} produces a warning; you need to move such macros manually. Also, if you want the package to use a configuration header file, you must add a call to @code{AC_CONFIG_HEADERS} (@pxref{Configuration Headers}). You might also have to change or add some @code{#if} directives to your program in order to make it work with Autoconf (@pxref{ifnames Invocation}, for information about a program that can help with that job). When using @command{autoscan} to maintain a @file{configure.ac}, simply consider adding its suggestions. The file @file{autoscan.log} contains detailed information on why a macro is requested. @command{autoscan} uses several data files (installed along with Autoconf) to determine which macros to output when it finds particular symbols in a package's source files. These data files all have the same format: each line consists of a symbol, one or more blanks, and the Autoconf macro to output if that symbol is encountered. Lines starting with @samp{#} are comments. @command{autoscan} accepts the following options: @table @option @item --help @itemx -h Print a summary of the command line options and exit. @item --version @itemx -V Print the version number of Autoconf and exit. @item --verbose @itemx -v Print the names of the files it examines and the potentially interesting symbols it finds in them. This output can be voluminous. @item --debug @itemx -d Don't remove temporary files. @item --include=@var{dir} @itemx -I @var{dir} Append @var{dir} to the include path. Multiple invocations accumulate. @item --prepend-include=@var{dir} @itemx -B @var{dir} Prepend @var{dir} to the include path. Multiple invocations accumulate. @end table @node ifnames Invocation @section Using @command{ifnames} to List Conditionals @cindex @command{ifnames} @command{ifnames} can help you write @file{configure.ac} for a software package. It prints the identifiers that the package already uses in C preprocessor conditionals. If a package has already been set up to have some portability, @command{ifnames} can thus help you figure out what its @command{configure} needs to check for. It may help fill in some gaps in a @file{configure.ac} generated by @command{autoscan} (@pxref{autoscan Invocation}). @command{ifnames} scans all of the C source files named on the command line (or the standard input, if none are given) and writes to the standard output a sorted list of all the identifiers that appear in those files in @code{#if}, @code{#elif}, @code{#ifdef}, or @code{#ifndef} directives. It prints each identifier on a line, followed by a space-separated list of the files in which that identifier occurs. @noindent @command{ifnames} accepts the following options: @table @option @item --help @itemx -h Print a summary of the command line options and exit. @item --version @itemx -V Print the version number of Autoconf and exit. @end table @node autoconf Invocation @section Using @command{autoconf} to Create @command{configure} @cindex @command{autoconf} To create @command{configure} from @file{configure.ac}, run the @command{autoconf} program with no arguments. @command{autoconf} processes @file{configure.ac} with the M4 macro processor, using the Autoconf macros. If you give @command{autoconf} an argument, it reads that file instead of @file{configure.ac} and writes the configuration script to the standard output instead of to @command{configure}. If you give @command{autoconf} the argument @option{-}, it reads from the standard input instead of @file{configure.ac} and writes the configuration script to the standard output. The Autoconf macros are defined in several files. Some of the files are distributed with Autoconf; @command{autoconf} reads them first. Then it looks for the optional file @file{acsite.m4} in the directory that contains the distributed Autoconf macro files, and for the optional file @file{aclocal.m4} in the current directory. Those files can contain your site's or the package's own Autoconf macro definitions (@pxref{Writing Autoconf Macros}, for more information). If a macro is defined in more than one of the files that @command{autoconf} reads, the last definition it reads overrides the earlier ones. @command{autoconf} accepts the following options: @table @option @item --help @itemx -h Print a summary of the command line options and exit. @item --version @itemx -V Print the version number of Autoconf and exit. @item --verbose @itemx -v Report processing steps. @item --debug @itemx -d Don't remove the temporary files. @item --force @itemx -f Remake @file{configure} even if newer than its input files. @item --include=@var{dir} @itemx -I @var{dir} Append @var{dir} to the include path. Multiple invocations accumulate. @item --prepend-include=@var{dir} @itemx -B @var{dir} Prepend @var{dir} to the include path. Multiple invocations accumulate. @item --output=@var{file} @itemx -o @var{file} Save output (script or trace) to @var{file}. The file @option{-} stands for the standard output. @item --warnings=@var{category}[,@var{category}...] @itemx -W@var{category}[,@var{category}...] @evindex WARNINGS Enable or disable warnings related to each @var{category}. @xref{m4_warn}, for a comprehensive list of categories. Special values include: @table @samp @item all Enable all categories of warnings. @item none Disable all categories of warnings. @item error Treat all warnings as errors. @item no-@var{category} Disable warnings falling into @var{category}. @end table The enviroment variable @env{WARNINGS} may also be set to a comma-separated list of warning categories to enable or disable. It is interpreted exactly the same way as the argument of @option{--warnings}, but unknown categories are silently ignored. The command line takes precedence; for instance, if @env{WARNINGS} is set to @code{obsolete}, but @option{-Wnone} is given on the command line, no warnings will be issued. Some categories of warnings are on by default. Again, for details see @ref{m4_warn}. @item --trace=@var{macro}[:@var{format}] @itemx -t @var{macro}[:@var{format}] Do not create the @command{configure} script, but list the calls to @var{macro} according to the @var{format}. Multiple @option{--trace} arguments can be used to list several macros. Multiple @option{--trace} arguments for a single macro are not cumulative; instead, you should just make @var{format} as long as needed. The @var{format} is a regular string, with newlines if desired, and several special escape codes. It defaults to @samp{$f:$l:$n:$%}; see @ref{autom4te Invocation}, for details on the @var{format}. @item --initialization @itemx -i By default, @option{--trace} does not trace the initialization of the Autoconf macros (typically the @code{AC_DEFUN} definitions). This results in a noticeable speedup, but can be disabled by this option. @end table It is often necessary to check the content of a @file{configure.ac} file, but parsing it yourself is extremely fragile and error-prone. It is suggested that you rely upon @option{--trace} to scan @file{configure.ac}. For instance, to find the list of variables that are substituted, use: @example @group $ @kbd{autoconf -t AC_SUBST} configure.ac:2:AC_SUBST:ECHO_C configure.ac:2:AC_SUBST:ECHO_N configure.ac:2:AC_SUBST:ECHO_T @i{More traces deleted} @end group @end example @noindent The example below highlights the difference between @samp{$@@}, @samp{$*}, and @samp{$%}. @example @group $ @kbd{cat configure.ac} AC_DEFINE(This, is, [an [example]]) $ @kbd{autoconf -t 'AC_DEFINE:@@: $@@} *: $* %: $%' @@: [This],[is],[an [example]] *: This,is,an [example] %: This:is:an [example] @end group @end example @noindent The @var{format} gives you a lot of freedom: @example @group $ @kbd{autoconf -t 'AC_SUBST:$$ac_subst@{"$1"@} = "$f:$l";'} $ac_subst@{"ECHO_C"@} = "configure.ac:2"; $ac_subst@{"ECHO_N"@} = "configure.ac:2"; $ac_subst@{"ECHO_T"@} = "configure.ac:2"; @i{More traces deleted} @end group @end example @noindent A long @var{separator} can be used to improve the readability of complex structures, and to ease their parsing (for instance when no single character is suitable as a separator): @example @group $ @kbd{autoconf -t 'AM_MISSING_PROG:$@{|:::::|@}*'} ACLOCAL|:::::|aclocal|:::::|$missing_dir AUTOCONF|:::::|autoconf|:::::|$missing_dir AUTOMAKE|:::::|automake|:::::|$missing_dir @i{More traces deleted} @end group @end example @node autoreconf Invocation @section Using @command{autoreconf} to Update @command{configure} Scripts @cindex @command{autoreconf} Installing the various components of the GNU Build System can be tedious: running @command{autopoint} for Gettext, @command{automake} for @file{Makefile.in} etc.@: in each directory. It may be needed either because some tools such as @command{automake} have been updated on your system, or because some of the sources such as @file{configure.ac} have been updated, or finally, simply in order to install the GNU Build System in a fresh tree. @command{autoreconf} runs @command{autoconf}, @command{autoheader}, @command{aclocal}, @command{automake}, @command{libtoolize}, @command{intltoolize}, @command{gtkdocize}, and @command{autopoint} (when appropriate) repeatedly to update the GNU Build System in the specified directories and their subdirectories (@pxref{Subdirectories}). By default, it only remakes those files that are older than their sources. The environment variables @env{AUTOM4TE}, @env{AUTOCONF}, @env{AUTOHEADER}, @env{AUTOMAKE}, @env{ACLOCAL}, @env{AUTOPOINT}, @env{LIBTOOLIZE}, @env{INTLTOOLIZE}, @env{GTKDOCIZE}, @env{M4}, and @env{MAKE} may be used to override the invocation of the respective tools. If you install a new version of some tool, you can make @command{autoreconf} remake @emph{all} of the files by giving it the @option{--force} option. @xref{Automatic Remaking}, for Make rules to automatically rebuild @command{configure} scripts when their source files change. That method handles the timestamps of configuration header templates properly, but does not pass @option{--autoconf-dir=@var{dir}} or @option{--localdir=@var{dir}}. @cindex Gettext @cindex @command{autopoint} Gettext supplies the @command{autopoint} command to add translation infrastructure to a source package. If you use @command{autopoint}, your @file{configure.ac} should invoke @code{AM_GNU_GETTEXT} and one of @code{AM_GNU_GETTEXT_VERSION(@var{gettext-version})} or @code{AM_GNU_GETTEXT_REQUIRE_VERSION(@var{min-gettext-version})}. @xref{autopoint Invocation, , Invoking the @code{autopoint} Program, gettext, GNU @code{gettext} utilities}, for further details. @noindent @command{autoreconf} accepts the following options: @table @option @item --help @itemx -h Print a summary of the command line options and exit. @item --version @itemx -V Print the version number of Autoconf and exit. @item --verbose @itemx -v Print the name of each directory @command{autoreconf} examines and the commands it runs. If given two or more times, pass @option{--verbose} to subordinate tools that support it. @item --debug @itemx -d Don't remove the temporary files. @item --force @itemx -f Consider all generated and standard auxiliary files to be obsolete. This remakes even @file{configure} scripts and configuration headers that are newer than their input files (@file{configure.ac} and, if present, @file{aclocal.m4}). If deemed appropriate, this option triggers calls to @samp{automake --force-missing}. Passing both @option{--force} and @option{--install} to @command{autoreconf} will in turn undo any customizations to standard files. Note that the macro @code{AM_INIT_AUTOMAKE} has some options which change the set of files considered to be standard. @item --install @itemx -i Install any missing standard auxiliary files in the package. By default, files are copied; this can be changed with @option{--symlink}. If deemed appropriate, this option triggers calls to @samp{automake --add-missing}, @samp{libtoolize}, @samp{autopoint}, etc. @item --no-recursive Do not rebuild files in subdirectories to configure (see @ref{Subdirectories}, macro @code{AC_CONFIG_SUBDIRS}). @item --symlink @itemx -s When used with @option{--install}, install symbolic links to the missing auxiliary files instead of copying them. @item --make @itemx -m When the directories were configured, update the configuration by running @samp{./config.status --recheck && ./config.status}, and then run @samp{make}. @item --include=@var{dir} @itemx -I @var{dir} Append @var{dir} to the include path. Multiple invocations accumulate. Passed on to @command{aclocal}, @command{autoconf} and @command{autoheader} internally. @item --prepend-include=@var{dir} @itemx -B @var{dir} Prepend @var{dir} to the include path. Multiple invocations accumulate. Passed on to @command{autoconf} and @command{autoheader} internally. @item --warnings=@var{category}[,@var{category}...] @itemx -W@var{category}[,@var{category}...] @evindex WARNINGS Enable or disable warnings related to each @var{category}. @xref{m4_warn}, for a comprehensive list of categories. Special values include: @table @samp @item all Enable all categories of warnings. @item none Disable all categories of warnings. @item error Treat all warnings as errors. @item no-@var{category} Disable warnings falling into @var{category}. @end table The enviroment variable @env{WARNINGS} may also be set to a comma-separated list of warning categories to enable or disable. It is interpreted exactly the same way as the argument of @option{--warnings}, but unknown categories are silently ignored. The command line takes precedence; for instance, if @env{WARNINGS} is set to @code{obsolete}, but @option{-Wnone} is given on the command line, no warnings will be issued. Some categories of warnings are on by default. Again, for details see @ref{m4_warn}. @end table If you want @command{autoreconf} to pass flags that are not listed here on to @command{aclocal}, set @code{ACLOCAL_AMFLAGS} in your @file{Makefile.am}. Due to a limitation in the Autoconf implementation these flags currently must be set on a single line in @file{Makefile.am}, without any backslash-newlines. Also, be aware that future Automake releases might start flagging @code{ACLOCAL_AMFLAGS} as obsolescent, or even remove support for it. @c ========================================= Initialization and Output Files. @node Setup @chapter Initialization and Output Files Autoconf-generated @command{configure} scripts need some information about how to initialize, such as how to find the package's source files and about the output files to produce. The following sections describe the initialization and the creation of output files. @menu * Initializing configure:: Option processing etc. * Versioning:: Dealing with Autoconf versions * Notices:: Copyright, version numbers in @command{configure} * Input:: Where Autoconf should find files * Output:: Outputting results from the configuration * Configuration Actions:: Preparing the output based on results * Configuration Files:: Creating output files * Makefile Substitutions:: Using output variables in makefiles * Configuration Headers:: Creating a configuration header file * Configuration Commands:: Running arbitrary instantiation commands * Configuration Links:: Links depending on the configuration * Subdirectories:: Configuring independent packages together * Default Prefix:: Changing the default installation prefix @end menu @node Initializing configure @section Initializing @command{configure} Every @command{configure} script must call @code{AC_INIT} before doing anything else that produces output. Calls to silent macros, such as @code{AC_DEFUN}, may also occur prior to @code{AC_INIT}, although these are generally used via @file{aclocal.m4}, since that is implicitly included before the start of @file{configure.ac}. The only other required macro is @code{AC_OUTPUT} (@pxref{Output}). @anchor{AC_INIT} @defmac AC_INIT (@var{package}, @var{version}, @ovar{bug-report}, @ @ovar{tarname}, @ovar{url}) @acindex{INIT} Process any command-line arguments and perform initialization and verification. Set the name of the @var{package} and its @var{version}. These are typically used in @option{--version} support, including that of @command{configure}. The optional argument @var{bug-report} should be the email to which users should send bug reports. The package @var{tarname} differs from @var{package}: the latter designates the full package name (e.g., @samp{GNU Autoconf}), while the former is meant for distribution tar ball names (e.g., @samp{autoconf}). It defaults to @var{package} with @samp{GNU } stripped, lower-cased, and all characters other than alphanumerics and underscores are changed to @samp{-}. If provided, @var{url} should be the home page for the package. Leading and trailing whitespace is stripped from all the arguments to @code{AC_INIT}, and interior whitespace is collapsed to a single space. This means that, for instance, if you want to put several email addresses in @var{bug-report}, you can put each one on its own line: @smallexample @group # We keep having problems with the mail hosting for # gnomovision.example, so give people an alternative. AC_INIT([Gnomovision], [17.0.1], [ bugs@@gnomovision.example or gnomo-bugs@@reliable-email.example ]) @end group @end smallexample The arguments to @code{AC_INIT} may be computed by M4, when @command{autoconf} is run. For instance, if you want to include the package's version number in the @var{tarname}, but you don't want to repeat it, you can use a helper macro: @smallexample @group m4_define([gnomo_VERSION], [17.0.1]) AC_INIT([Gnomovision], m4_defn([gnomo_VERSION]), [bugs@@gnomovision.example], [gnomo-]m4_defn([gnomo_VERSION])) @end group @end smallexample This uses @code{m4_defn} to produce the expansion of @code{gnomo_VERSION} @emph{as a quoted string}, so that if there happen to be any more M4 macro names in @code{gnomo_VERSION}, they will not be expanded. @xref{Defn,,Renaming Macros,m4,GNU m4 macro processor}. Continuing this example, if you don't want to embed the version number in @file{configure.ac} at all, you can use @code{m4_esyscmd} to look it up somewhere else when @command{autoconf} is run: @smallexample @group m4_define([gnomo_VERSION], m4_esyscmd([build-aux/git-version-gen .tarball-version])) AC_INIT([Gnomovision], m4_defn([gnomo_VERSION]), [bugs@@gnomovision.example], [gnomo-]m4_defn([gnomo_VERSION])) @end group @end smallexample This uses the utility script @command{git-version-gen} to look up the package's version in its version control metadata. This script is part of Gnulib (@pxref{Gnulib}). The arguments to @code{AC_INIT} are written into @file{configure} in several different places. Therefore, we strongly recommend that you write any M4 logic in @code{AC_INIT} arguments to be evaluated @emph{before} @code{AC_INIT} itself is evaluated. For instance, in the above example, the second argument to @code{m4_define} is @emph{not} quoted, so the @code{m4_esyscmd} is evaluated only once, and @code{gnomo_VERSION} is defined to the output of the command. If the second argument to @code{m4_define} were quoted, @code{m4_esyscmd} would be evaluated each time the @var{version} or @var{tarname} arguments were written to @file{configure}, and the command would be run repeatedly. In some of the places where the arguments to @code{AC_INIT} are used, within @file{configure}, shell evaluation cannot happen. Therefore, the arguments to @code{AC_INIT} may @emph{not} be computed when @command{configure} is run. If they contain any construct that isn't always treated as literal by the shell (e.g.@: variable expansions), @command{autoconf} will issue an error. The @var{tarname} argument is used to construct filenames. It should not contain wildcard characters, white space, or anything else that could be troublesome as part of a file or directory name. Some of M4's active characters (notably parentheses, square brackets, @samp{,} and @samp{#}) commonly appear in URLs and lists of email addresses. If any of these characters appear in an argument to AC_INIT, that argument will probably need to be double-quoted to avoid errors and mistranscriptions. @xref{M4 Quotation}. The following M4 macros (e.g., @code{AC_PACKAGE_NAME}), output variables (e.g., @code{PACKAGE_NAME}), and preprocessor symbols (e.g., @code{PACKAGE_NAME}), are defined by @code{AC_INIT}: @table @asis @item @code{AC_PACKAGE_NAME}, @code{PACKAGE_NAME} @acindex{PACKAGE_NAME} @ovindex PACKAGE_NAME @cvindex PACKAGE_NAME Exactly @var{package}. @item @code{AC_PACKAGE_TARNAME}, @code{PACKAGE_TARNAME} @acindex{PACKAGE_TARNAME} @ovindex PACKAGE_TARNAME @cvindex PACKAGE_TARNAME Exactly @var{tarname}, possibly generated from @var{package}. @item @code{AC_PACKAGE_VERSION}, @code{PACKAGE_VERSION} @acindex{PACKAGE_VERSION} @ovindex PACKAGE_VERSION @cvindex PACKAGE_VERSION Exactly @var{version}. @item @code{AC_PACKAGE_STRING}, @code{PACKAGE_STRING} @acindex{PACKAGE_STRING} @ovindex PACKAGE_STRING @cvindex PACKAGE_STRING Exactly @samp{@var{package} @var{version}}. @item @code{AC_PACKAGE_BUGREPORT}, @code{PACKAGE_BUGREPORT} @acindex{PACKAGE_BUGREPORT} @ovindex PACKAGE_BUGREPORT @cvindex PACKAGE_BUGREPORT Exactly @var{bug-report}, if one was provided. Typically an email address, or URL to a bug management web page. @item @code{AC_PACKAGE_URL}, @code{PACKAGE_URL} @acindex{PACKAGE_URL} @ovindex PACKAGE_URL @cvindex PACKAGE_URL Exactly @var{url}, if one was provided. If @var{url} was empty, but @var{package} begins with @samp{GNU }, then this defaults to @samp{https://@/www.gnu.org/@/software/@/@var{tarname}/}, otherwise, no URL is assumed. @end table @end defmac If your @command{configure} script does its own option processing, it should inspect @samp{$@@} or @samp{$*} immediately after calling @code{AC_INIT}, because other Autoconf macros liberally use the @command{set} command to process strings, and this has the side effect of updating @samp{$@@} and @samp{$*}. However, we suggest that you use standard macros like @code{AC_ARG_ENABLE} instead of attempting to implement your own option processing. @xref{Site Configuration}. @node Versioning @section Dealing with Autoconf versions @cindex Autoconf version @cindex version, Autoconf The following optional macros can be used to help choose the minimum version of Autoconf that can successfully compile a given @file{configure.ac}. @defmac AC_PREREQ (@var{version}) @acindex{PREREQ} @cindex Version Ensure that a recent enough version of Autoconf is being used. If the version of Autoconf being used to create @command{configure} is earlier than @var{version}, print an error message to the standard error output and exit with failure (exit status is 63). For example: @example AC_PREREQ([@value{VERSION}]) @end example This macro may be used before @code{AC_INIT}. @end defmac @defmac AC_AUTOCONF_VERSION @acindex{AUTOCONF_VERSION} This macro was introduced in Autoconf 2.62. It identifies the version of Autoconf that is currently parsing the input file, in a format suitable for @code{m4_version_compare} (@pxref{m4_version_compare}); in other words, for this release of Autoconf, its value is @samp{@value{VERSION}}. One potential use of this macro is for writing conditional fallbacks based on when a feature was added to Autoconf, rather than using @code{AC_PREREQ} to require the newer version of Autoconf. However, remember that the Autoconf philosophy favors feature checks over version checks. You should not expand this macro directly; use @samp{m4_defn([AC_AUTOCONF_VERSION])} instead. This is because some users might have a beta version of Autoconf installed, with arbitrary letters included in its version string. This means it is possible for the version string to contain the name of a defined macro, such that expanding @code{AC_AUTOCONF_VERSION} would trigger the expansion of that macro during rescanning, and change the version string to be different than what you intended to check. @end defmac @node Notices @section Notices in @command{configure} @cindex Notices in @command{configure} The following macros manage version numbers for @command{configure} scripts. Using them is optional. @defmac AC_COPYRIGHT (@var{copyright-notice}) @acindex{COPYRIGHT} @cindex Copyright Notice State that, in addition to the Free Software Foundation's copyright on the Autoconf macros, parts of your @command{configure} are covered by the @var{copyright-notice}. The @var{copyright-notice} shows up in both the head of @command{configure} and in @samp{configure --version}. @end defmac @defmac AC_REVISION (@var{revision-info}) @acindex{REVISION} @cindex Revision Copy revision stamp @var{revision-info} into the @command{configure} script, with any dollar signs or double-quotes removed. This macro lets you put a revision stamp from @file{configure.ac} into @command{configure} without RCS or CVS changing it when you check in @command{configure}. That way, you can determine easily which revision of @file{configure.ac} a particular @command{configure} corresponds to. For example, this line in @file{configure.ac}: @c The @w prevents RCS from changing the example in the manual. @example AC_REVISION([@w{$}Revision: 1.30 $]) @end example @noindent produces this in @command{configure}: @example #!/bin/sh # From configure.ac Revision: 1.30 @end example @end defmac @node Input @section Configure Input: Source Code, Macros, and Auxiliary Files The following macros help you manage the contents of your source tree. @anchor{AC_CONFIG_SRCDIR} @defmac AC_CONFIG_SRCDIR (@var{unique-file-in-source-dir}) @acindex{CONFIG_SRCDIR} Distinguish this package's source directory from other source directories that might happen to exist in the file system. @var{unique-file-in-source-dir} should name a file that is unique to this package. @command{configure} will verify that this file exists in @file{@var{srcdir}}, before it runs any other checks. Use of this macro is strongly recommended. It protects against people accidentally specifying the wrong directory with @option{--srcdir}. @xref{configure Invocation}, for more information. @end defmac Packages that use @command{aclocal} to generate @file{aclocal.m4} should declare where local macros can be found using @code{AC_CONFIG_MACRO_DIRS}. @defmac AC_CONFIG_MACRO_DIRS (@var{dir1} [@var{dir2} ... @var{dirN}]) @defmacx AC_CONFIG_MACRO_DIR (@var{dir}) @acindex{CONFIG_MACRO_DIRS} @acindex{CONFIG_MACRO_DIR} @acindex{CONFIG_MACRO_DIR_TRACE} Specify the given directories as the location of additional local Autoconf macros. These macros are intended for use by commands like @command{autoreconf} or @command{aclocal} that trace macro calls; they should be called directly from @file{configure.ac} so that tools that install macros for @command{aclocal} can find the macros' declarations. Tools that want to learn which directories have been selected should trace @code{AC_CONFIG_MACRO_DIR_TRACE}, which will be called once per directory. AC_CONFIG_MACRO_DIRS is the preferred form, and can be called multiple times and with multiple arguments; in such cases, directories in earlier calls are expected to be searched before directories in later calls, and directories appearing in the same call are expected to be searched in the order in which they appear in the call. For historical reasons, the macro AC_CONFIG_MACRO_DIR can also be used once, if it appears first, for tools such as older @command{libtool} that weren't prepared to handle multiple directories. For example, a usage like @smallexample AC_CONFIG_MACRO_DIR([dir1]) AC_CONFIG_MACRO_DIRS([dir2]) AC_CONFIG_MACRO_DIRS([dir3 dir4]) @end smallexample will cause the trace of AC_CONFIG_MACRO_DIR_TRACE to appear four times, and should cause the directories to be searched in this order: @samp{dir1 dir2 dir3 dir4}. Note that if you use @command{aclocal} from an Automake release prior to 1.13 to generate @file{aclocal.m4}, you must also set @code{ACLOCAL_AMFLAGS = -I @var{dir1} [-I @var{dir2} ... -I @var{dirN}]} in your top-level @file{Makefile.am}. Due to a limitation in the Autoconf implementation of @command{autoreconf}, these include directives currently must be set on a single line in @file{Makefile.am}, without any backslash-newlines. @end defmac @prindex @command{config.guess} @prindex @command{config.sub} @prindex @command{install-sh} Some Autoconf macros require auxiliary scripts. @code{AC_PROG_INSTALL} and @code{AC_PROG_@w{MKDIR_P}} (@pxref{Particular Programs}) require a fallback implementation of @command{install} called @file{install-sh}, and the @code{AC_CANONICAL} macros (@pxref{Manual Configuration}) require the system-identification scripts @file{config.sub} and @file{config.guess}. Third-party tools, such as Automake and Libtool, may require additional auxiliary scripts. By default, @command{configure} looks for these scripts next to itself, in @file{@var{srcdir}}. For convenience when working with subdirectories with their own configure scripts (@pxref{Subdirectories}), if the scripts are not in @file{@var{srcdir}} it will also look in @file{@var{srcdir}/..} and @file{@var{srcdir}/../..}. All of the scripts must be found in the same directory. If these default locations are not adequate, or simply to reduce clutter at the top level of the source tree, packages can use @code{AC_CONFIG_AUX_DIR} to declare where to look for auxiliary scripts. @defmac AC_CONFIG_AUX_DIR (@var{dir}) @acindex{CONFIG_AUX_DIR} Look for auxiliary scripts in @var{dir}. Normally, @var{dir} should be a relative path, which is taken as relative to @file{@var{srcdir}}. If @var{dir} is an absolute path or contains shell variables, however, it is used as-is. When the goal of using @code{AC_CONFIG_AUX_DIR} is to reduce clutter at the top level of the source tree, the conventional name for @var{dir} is @file{build-aux}. If you need portability to DOS variants, do not name the auxiliary directory @file{aux}. @xref{File System Conventions}. @end defmac @defmac AC_REQUIRE_AUX_FILE (@var{file}) @acindex{REQUIRE_AUX_FILE} @vrindex ac_aux_dir Declare that @var{file} is an auxiliary script needed by this configure script, and set the shell variable @code{ac_aux_dir} to the directory where it can be found. The value of @code{ac_aux_dir} is guaranteed to end with a @samp{/}. Macros that need auxiliary scripts must use this macro to register each script they need. @end defmac @command{configure} checks for all the auxiliary scripts it needs on startup, and exits with an error if any are missing. @command{autoreconf} also detects missing auxiliary scripts. When used with the @option{--install} option, @command{autoreconf} will try to add missing scripts to the directory specified by @code{AC_CONFIG_AUX_DIR}, or to the top level of the source tree if @code{AC_CONFIG_AUX_DIR} was not used. It can always do this for the scripts needed by Autoconf core macros: @file{install-sh}, @file{config.sub}, and @file{config.guess}. Many other commonly-needed scripts are installed by the third-party tools that @command{autoreconf} knows how to run, such as @file{missing} for Automake and @file{ltmain.sh} for Libtool. If you are using Automake, auxiliary scripts will automatically be included in the tarball created by @command{make dist}. If you are not using Automake you will need to arrange for auxiliary scripts to be included in tarballs yourself. Auxiliary scripts should normally @emph{not} be checked into a version control system, for the same reasons that @command{configure} shouldn't be. The scripts needed by Autoconf core macros can be found in @file{$(datadir)/autoconf/build-aux} of the Autoconf installation (@pxref{Installation Directory Variables}). @file{install-sh} can be downloaded from @url{https://git.savannah.gnu.org/cgit/automake.git/plain/lib/install-sh}. @file{config.sub} and @file{config.guess} can be downloaded from @url{https://git.savannah.gnu.org/cgit/config.git/tree/}. @node Output @section Outputting Files @cindex Outputting files Every Autoconf script, e.g., @file{configure.ac}, should finish by calling @code{AC_OUTPUT}. That is the macro that generates and runs @file{config.status}, which in turn creates the makefiles and any other files resulting from configuration. This is the only required macro besides @code{AC_INIT} (@pxref{Input}). @anchor{AC_OUTPUT} @defmac AC_OUTPUT @acindex{OUTPUT} @cindex Instantiation Generate @file{config.status} and launch it. Call this macro once, at the end of @file{configure.ac}. @file{config.status} performs all the configuration actions: all the output files (see @ref{Configuration Files}, macro @code{AC_CONFIG_FILES}), header files (see @ref{Configuration Headers}, macro @code{AC_CONFIG_HEADERS}), commands (see @ref{Configuration Commands}, macro @code{AC_CONFIG_COMMANDS}), links (see @ref{Configuration Links}, macro @code{AC_CONFIG_LINKS}), subdirectories to configure (see @ref{Subdirectories}, macro @code{AC_CONFIG_SUBDIRS}) are honored. The location of your @code{AC_OUTPUT} invocation is the exact point where configuration actions are taken: any code afterwards is executed by @command{configure} once @command{config.status} was run. If you want to bind actions to @command{config.status} itself (independently of whether @command{configure} is being run), see @ref{Configuration Commands, , Running Arbitrary Configuration Commands}. @end defmac Historically, the usage of @code{AC_OUTPUT} was somewhat different. @xref{Obsolete Macros}, for a description of the arguments that @code{AC_OUTPUT} used to support. If you run @command{make} in subdirectories, you should run it using the @command{make} variable @code{MAKE}. Most versions of @command{make} set @code{MAKE} to the name of the @command{make} program plus any options it was given. (But many do not include in it the values of any variables set on the command line, so those are not passed on automatically.) Some old versions of @command{make} do not set this variable. The following macro allows you to use it even with those versions. @anchor{AC_PROG_MAKE_SET} @defmac AC_PROG_MAKE_SET @acindex{PROG_MAKE_SET} @ovindex SET_MAKE If the Make command, @code{$MAKE} if set or else @samp{make}, predefines @code{$(MAKE)}, define output variable @code{SET_MAKE} to be empty. Otherwise, define @code{SET_MAKE} to a macro definition that sets @code{$(MAKE)}, such as @samp{MAKE=make}. Calls @code{AC_SUBST} for @code{SET_MAKE}. @end defmac If you use this macro, place a line like this in each @file{Makefile.in} that runs @command{MAKE} on other directories: @example @@SET_MAKE@@ @end example @node Configuration Actions @section Performing Configuration Actions @cindex Configuration actions @file{configure} is designed so that it appears to do everything itself, but there is actually a hidden slave: @file{config.status}. @file{configure} is in charge of examining your system, but it is @file{config.status} that actually takes the proper actions based on the results of @file{configure}. The most typical task of @file{config.status} is to @emph{instantiate} files. @acindex{CONFIG_@var{ITEMS}} This section describes the common behavior of the four standard instantiating macros: @code{AC_CONFIG_FILES}, @code{AC_CONFIG_HEADERS}, @code{AC_CONFIG_COMMANDS} and @code{AC_CONFIG_LINKS}. They all have this prototype: @c FIXME: Can't use @ovar here, Texinfo 4.0 goes lunatic and emits something @c awful. @example AC_CONFIG_@var{ITEMS}(@var{tag}@dots{}, @r{[}@var{commands}@r{]}, @r{[}@var{init-cmds}@r{]}) @end example @noindent where the arguments are: @table @var @item tag@dots{} A blank-or-newline-separated list of tags, which are typically the names of the files to instantiate. You are encouraged to use literals as @var{tags}. In particular, you should avoid @example @dots{} && my_foos="$my_foos fooo" @dots{} && my_foos="$my_foos foooo" AC_CONFIG_@var{ITEMS}([$my_foos]) @end example @noindent and use this instead: @example @dots{} && AC_CONFIG_@var{ITEMS}([fooo]) @dots{} && AC_CONFIG_@var{ITEMS}([foooo]) @end example The macros @code{AC_CONFIG_FILES} and @code{AC_CONFIG_HEADERS} use special @var{tag} values: they may have the form @samp{@var{output}} or @samp{@var{output}:@var{inputs}}. The file @var{output} is instantiated from its templates, @var{inputs} (defaulting to @samp{@var{output}.in}). @samp{AC_CONFIG_FILES([Makefile:boiler/top.mk:boiler/bot.mk])}, for example, asks for the creation of the file @file{Makefile} that contains the expansion of the output variables in the concatenation of @file{boiler/top.mk} and @file{boiler/bot.mk}. The special value @samp{-} might be used to denote the standard output when used in @var{output}, or the standard input when used in the @var{inputs}. You most probably don't need to use this in @file{configure.ac}, but it is convenient when using the command line interface of @file{./config.status}, see @ref{config.status Invocation}, for more details. The @var{inputs} may be absolute or relative file names. In the latter case they are first looked for in the build tree, and then in the source tree. Input files should be text files, and a line length below 2000 bytes should be safe. @item commands Shell commands output literally into @file{config.status}, and associated with a tag that the user can use to tell @file{config.status} which commands to run. The commands are run each time a @var{tag} request is given to @file{config.status}, typically each time the file @file{@var{tag}} is created. The variables set during the execution of @command{configure} are @emph{not} available here: you first need to set them via the @var{init-cmds}. Nonetheless the following variables are pre-computed: @table @code @item srcdir @vrindex srcdir The name of the top source directory, assuming that the working directory is the top build directory. This is what @command{configure}'s @option{--srcdir} option sets. @item ac_top_srcdir @vrindex ac_top_srcdir The name of the top source directory, assuming that the working directory is the current build directory. @item ac_top_build_prefix @vrindex ac_top_build_prefix The name of the top build directory, assuming that the working directory is the current build directory. It can be empty, or else ends with a slash, so that you may concatenate it. @item ac_srcdir @vrindex ac_srcdir The name of the corresponding source directory, assuming that the working directory is the current build directory. @item tmp @vrindex tmp The name of a temporary directory within the build tree, which you can use if you need to create additional temporary files. The directory is cleaned up when @command{config.status} is done or interrupted. Please use package-specific file name prefixes to avoid clashing with files that @command{config.status} may use internally. @end table @noindent The @dfn{current} directory refers to the directory (or pseudo-directory) containing the input part of @var{tags}. For instance, running @example AC_CONFIG_COMMANDS([deep/dir/out:in/in.in], [@dots{}], [@dots{}]) @end example @noindent with @option{--srcdir=../package} produces the following values: @example # Argument of --srcdir srcdir='../package' # Reversing deep/dir ac_top_build_prefix='../../' # Concatenation of $ac_top_build_prefix and srcdir ac_top_srcdir='../../../package' # Concatenation of $ac_top_srcdir and deep/dir ac_srcdir='../../../package/deep/dir' @end example @noindent independently of @samp{in/in.in}. @item init-cmds Shell commands output @emph{unquoted} near the beginning of @file{config.status}, and executed each time @file{config.status} runs (regardless of the tag). Because they are unquoted, for example, @samp{$var} is output as the value of @code{var}. @var{init-cmds} is typically used by @file{configure} to give @file{config.status} some variables it needs to run the @var{commands}. You should be extremely cautious in your variable names: all the @var{init-cmds} share the same name space and may overwrite each other in unpredictable ways. Sorry@enddots{} @end table All these macros can be called multiple times, with different @var{tag} values, of course! @node Configuration Files @section Creating Configuration Files @cindex Creating configuration files @cindex Configuration file creation Be sure to read the previous section, @ref{Configuration Actions}. @anchor{AC_CONFIG_FILES} @defmac AC_CONFIG_FILES (@var{file}@dots{}, @ovar{cmds}, @ovar{init-cmds}) @acindex{CONFIG_FILES} Make @code{AC_OUTPUT} create each @file{@var{file}} by copying an input file (by default @file{@var{file}.in}), substituting the output variable values. @c Before we used to have this feature, which was later rejected @c because it complicates the writing of makefiles: @c If the file would be unchanged, it is left untouched, to preserve @c timestamp. This macro is one of the instantiating macros; see @ref{Configuration Actions}. @xref{Makefile Substitutions}, for more information on using output variables. @xref{Setting Output Variables}, for more information on creating them. This macro creates the directory that the file is in if it doesn't exist. Usually, makefiles are created this way, but other files, such as @file{.gdbinit}, can be specified as well. Typical calls to @code{AC_CONFIG_FILES} look like this: @example AC_CONFIG_FILES([Makefile src/Makefile man/Makefile X/Imakefile]) AC_CONFIG_FILES([autoconf], [chmod +x autoconf]) @end example You can override an input file name by appending to @var{file} a colon-separated list of input files. Examples: @example AC_CONFIG_FILES([Makefile:boiler/top.mk:boiler/bot.mk] [lib/Makefile:boiler/lib.mk]) @end example @noindent Doing this allows you to keep your file names acceptable to DOS variants, or to prepend and/or append boilerplate to the file. The @var{file} names should not contain shell metacharacters. @xref{Special Chars in Variables}. @end defmac @node Makefile Substitutions @section Substitutions in Makefiles @cindex Substitutions in makefiles @cindex Makefile substitutions Each subdirectory in a distribution that contains something to be compiled or installed should come with a file @file{Makefile.in}, from which @command{configure} creates a file @file{Makefile} in that directory. To create @file{Makefile}, @command{configure} performs a simple variable substitution, replacing occurrences of @samp{@@@var{variable}@@} in @file{Makefile.in} with the value that @command{configure} has determined for that variable. Variables that are substituted into output files in this way are called @dfn{output variables}. They are ordinary shell variables that are set in @command{configure}. To make @command{configure} substitute a particular variable into the output files, the macro @code{AC_SUBST} must be called with that variable name as an argument. Any occurrences of @samp{@@@var{variable}@@} for other variables are left unchanged. @xref{Setting Output Variables}, for more information on creating output variables with @code{AC_SUBST}. A software package that uses a @command{configure} script should be distributed with a file @file{Makefile.in}, but no makefile; that way, the user has to properly configure the package for the local system before compiling it. @xref{Makefile Conventions, , Makefile Conventions, standards, The GNU Coding Standards}, for more information on what to put in makefiles. @menu * Preset Output Variables:: Output variables that are always set * Installation Directory Variables:: Other preset output variables * Changed Directory Variables:: Warnings about @file{datarootdir} * Build Directories:: Supporting multiple concurrent compiles * Automatic Remaking:: Makefile rules for configuring @end menu @node Preset Output Variables @subsection Preset Output Variables @cindex Output variables Some output variables are preset by the Autoconf macros. Some of the Autoconf macros set additional output variables, which are mentioned in the descriptions for those macros. @xref{Output Variable Index}, for a complete list of output variables. @xref{Installation Directory Variables}, for the list of the preset ones related to installation directories. Below are listed the other preset ones, many of which are precious variables (@pxref{Setting Output Variables}, @code{AC_ARG_VAR}). The preset variables which are available during @file{config.status} (@pxref{Configuration Actions}) may also be used during @command{configure} tests. For example, it is permissible to reference @samp{$srcdir} when constructing a list of directories to pass via the @option{-I} option during a compiler feature check. When used in this manner, coupled with the fact that @command{configure} is always run from the top build directory, it is sufficient to use just @samp{$srcdir} instead of @samp{$top_srcdir}. @c Just say no to ASCII sorting! We're humans, not computers. @c These variables are listed as they would be in a dictionary: @c actor @c Actress @c actress @defvar CFLAGS @evindex CFLAGS @ovindex CFLAGS Debugging and optimization options for the C compiler. If it is not set in the environment when @command{configure} runs, the default value is set when you call @code{AC_PROG_CC} (or empty if you don't). @command{configure} uses this variable when compiling or linking programs to test for C features. If a compiler option affects only the behavior of the preprocessor (e.g., @option{-D@var{name}}), it should be put into @code{CPPFLAGS} instead. If it affects only the linker (e.g., @option{-L@var{directory}}), it should be put into @code{LDFLAGS} instead. If it affects only the compiler proper, @code{CFLAGS} is the natural home for it. If an option affects multiple phases of the compiler, though, matters get tricky: @itemize @bullet @item If an option selects a 32-bit or 64-bit build on a bi-arch system, it must be put direcly into @code{CC}, e.g., @code{CC='gcc -m64'}. This is necessary for @code{config.guess} to work right. @item Otherwise one approach is to put the option into @code{CC}. Another is to put it into both @code{CPPFLAGS} and @code{LDFLAGS}, but not into @code{CFLAGS}. @end itemize However, remember that some @file{Makefile} variables are reserved by the GNU Coding Standards for the use of the ``user''---the person building the package. For instance, @code{CFLAGS} is one such variable. Sometimes package developers are tempted to set user variables such as @code{CFLAGS} because it appears to make their job easier. However, the package itself should never set a user variable, particularly not to include switches that are required for proper compilation of the package. Since these variables are documented as being for the package builder, that person rightfully expects to be able to override any of these variables at build time. If the package developer needs to add switches without interfering with the user, the proper way to do that is to introduce an additional variable. Automake makes this easy by introducing @code{AM_CFLAGS} (@pxref{Flag Variables Ordering, , , automake, GNU Automake}), but the concept is the same even if Automake is not used. @end defvar @defvar configure_input @ovindex configure_input A comment saying that the file was generated automatically by @command{configure} and giving the name of the input file. @code{AC_OUTPUT} adds a comment line containing this variable to the top of every makefile it creates. For other files, you should reference this variable in a comment at the top of each input file. For example, an input shell script should begin like this: @example #!/bin/sh # @@configure_input@@ @end example @noindent The presence of that line also reminds people editing the file that it needs to be processed by @command{configure} in order to be used. @end defvar @defvar CPPFLAGS @evindex CPPFLAGS @ovindex CPPFLAGS Preprocessor options for the C, C++, Objective C, and Objective C++ preprocessors and compilers. If it is not set in the environment when @command{configure} runs, the default value is empty. @command{configure} uses this variable when preprocessing or compiling programs to test for C, C++, Objective C, and Objective C++ features. This variable's contents should contain options like @option{-I}, @option{-D}, and @option{-U} that affect only the behavior of the preprocessor. Please see the explanation of @code{CFLAGS} for what you can do if an option affects other phases of the compiler as well. Currently, @command{configure} always links as part of a single invocation of the compiler that also preprocesses and compiles, so it uses this variable also when linking programs. However, it is unwise to depend on this behavior because the GNU Coding Standards do not require it and many packages do not use @code{CPPFLAGS} when linking programs. @xref{Special Chars in Variables}, for limitations that @code{CPPFLAGS} might run into. @end defvar @defvar CXXFLAGS @evindex CXXFLAGS @ovindex CXXFLAGS Debugging and optimization options for the C++ compiler. It acts like @code{CFLAGS}, but for C++ instead of C. @end defvar @defvar DEFS @ovindex DEFS @option{-D} options to pass to the C compiler. If @code{AC_CONFIG_HEADERS} is called, @command{configure} replaces @samp{@@DEFS@@} with @option{-DHAVE_CONFIG_H} instead (@pxref{Configuration Headers}). This variable is not defined while @command{configure} is performing its tests, only when creating the output files. @xref{Setting Output Variables}, for how to check the results of previous tests. @end defvar @defvar ECHO_C @defvarx ECHO_N @defvarx ECHO_T @ovindex ECHO_C @ovindex ECHO_N @ovindex ECHO_T How does one suppress the trailing newline from @command{echo} for question-answer message pairs? These variables provide a way: @example echo $ECHO_N "And the winner is... $ECHO_C" sleep 100000000000 echo "$@{ECHO_T@}dead." @end example @noindent Some old and uncommon @command{echo} implementations offer no means to achieve this, in which case @code{ECHO_T} is set to tab. You might not want to use it. @end defvar @defvar ERLCFLAGS @evindex ERLCFLAGS @ovindex ERLCFLAGS Debugging and optimization options for the Erlang compiler. If it is not set in the environment when @command{configure} runs, the default value is empty. @command{configure} uses this variable when compiling programs to test for Erlang features. @end defvar @defvar FCFLAGS @evindex FCFLAGS @ovindex FCFLAGS Debugging and optimization options for the Fortran compiler. If it is not set in the environment when @command{configure} runs, the default value is set when you call @code{AC_PROG_FC} (or empty if you don't). @command{configure} uses this variable when compiling or linking programs to test for Fortran features. @end defvar @defvar FFLAGS @evindex FFLAGS @ovindex FFLAGS Debugging and optimization options for the Fortran 77 compiler. If it is not set in the environment when @command{configure} runs, the default value is set when you call @code{AC_PROG_F77} (or empty if you don't). @command{configure} uses this variable when compiling or linking programs to test for Fortran 77 features. @end defvar @defvar LDFLAGS @evindex LDFLAGS @ovindex LDFLAGS Options for the linker. If it is not set in the environment when @command{configure} runs, the default value is empty. @command{configure} uses this variable when linking programs to test for C, C++, Objective C, Objective C++, Fortran, and Go features. This variable's contents should contain options like @option{-s} and @option{-L} that affect only the behavior of the linker. Please see the explanation of @code{CFLAGS} for what you can do if an option also affects other phases of the compiler. Don't use this variable to pass library names (@option{-l}) to the linker; use @code{LIBS} instead. @end defvar @defvar LIBS @evindex LIBS @ovindex LIBS @option{-l} options to pass to the linker. The default value is empty, but some Autoconf macros may prepend extra libraries to this variable if those libraries are found and provide necessary functions, see @ref{Libraries}. @command{configure} uses this variable when linking programs to test for C, C++, Objective C, Objective C++, Fortran, and Go features. @end defvar @defvar OBJCFLAGS @evindex OBJCFLAGS @ovindex OBJCFLAGS Debugging and optimization options for the Objective C compiler. It acts like @code{CFLAGS}, but for Objective C instead of C. @end defvar @defvar OBJCXXFLAGS @evindex OBJCXXFLAGS @ovindex OBJCXXFLAGS Debugging and optimization options for the Objective C++ compiler. It acts like @code{CXXFLAGS}, but for Objective C++ instead of C++. @end defvar @defvar GOFLAGS @evindex GOFLAGS @ovindex GOFLAGS Debugging and optimization options for the Go compiler. It acts like @code{CFLAGS}, but for Go instead of C. @end defvar @defvar builddir @ovindex builddir Rigorously equal to @samp{.}. Added for symmetry only. @end defvar @defvar abs_builddir @ovindex abs_builddir Absolute name of @code{builddir}. @end defvar @defvar top_builddir @ovindex top_builddir The relative name of the top level of the current build tree. In the top-level directory, this is the same as @code{builddir}. @end defvar @defvar top_build_prefix @ovindex top_build_prefix The relative name of the top level of the current build tree with final slash if nonempty. This is the same as @code{top_builddir}, except that it contains zero or more runs of @code{../}, so it should not be appended with a slash for concatenation. This helps for @command{make} implementations that otherwise do not treat @file{./file} and @file{file} as equal in the top-level build directory. @end defvar @defvar abs_top_builddir @ovindex abs_top_builddir Absolute name of @code{top_builddir}. @end defvar @defvar srcdir @ovindex srcdir The name of the directory that contains the source code for that makefile. @end defvar @defvar abs_srcdir @ovindex abs_srcdir Absolute name of @code{srcdir}. @end defvar @defvar top_srcdir @ovindex top_srcdir The name of the top-level source code directory for the package. In the top-level directory, this is the same as @code{srcdir}. @end defvar @defvar abs_top_srcdir @ovindex abs_top_srcdir Absolute name of @code{top_srcdir}. @end defvar @node Installation Directory Variables @subsection Installation Directory Variables @cindex Installation directories @cindex Directories, installation The following variables specify the directories for package installation, see @ref{Directory Variables, , Variables for Installation Directories, standards, The GNU Coding Standards}, for more information. Each variable corresponds to an argument of @command{configure}; trailing slashes are stripped so that expressions such as @samp{$@{prefix@}/lib} expand with only one slash between directory names. See the end of this section for details on when and how to use these variables. @defvar bindir @ovindex bindir The directory for installing executables that users run. @end defvar @defvar datadir @ovindex datadir The directory for installing idiosyncratic read-only architecture-independent data. @end defvar @defvar datarootdir @ovindex datarootdir The root of the directory tree for read-only architecture-independent data files. @end defvar @defvar docdir @ovindex docdir The directory for installing documentation files (other than Info and man). @end defvar @defvar dvidir @ovindex dvidir The directory for installing documentation files in DVI format. @end defvar @defvar exec_prefix @ovindex exec_prefix The installation prefix for architecture-dependent files. By default it's the same as @code{prefix}. You should avoid installing anything directly to @code{exec_prefix}. However, the default value for directories containing architecture-dependent files should be relative to @code{exec_prefix}. @end defvar @defvar htmldir @ovindex htmldir The directory for installing HTML documentation. @end defvar @defvar includedir @ovindex includedir The directory for installing C header files. @end defvar @defvar infodir @ovindex infodir The directory for installing documentation in Info format. @end defvar @defvar libdir @ovindex libdir The directory for installing object code libraries. @end defvar @defvar libexecdir @ovindex libexecdir The directory for installing executables that other programs run. @end defvar @defvar localedir @ovindex localedir The directory for installing locale-dependent but architecture-independent data, such as message catalogs. This directory usually has a subdirectory per locale. @end defvar @defvar localstatedir @ovindex localstatedir The directory for installing modifiable single-machine data. Content in this directory typically survives a reboot. @end defvar @defvar runstatedir @ovindex runstatedir The directory for installing temporary modifiable single-machine data. Content in this directory survives as long as the process is running (such as pid files), as contrasted with @file{/tmp} that may be periodically cleaned. Conversely, this directory is typically cleaned on a reboot. By default, this is a subdirectory of @code{localstatedir}. @end defvar @defvar mandir @ovindex mandir The top-level directory for installing documentation in man format. @end defvar @defvar oldincludedir @ovindex oldincludedir The directory for installing C header files for non-GCC compilers. @end defvar @defvar pdfdir @ovindex pdfdir The directory for installing PDF documentation. @end defvar @defvar prefix @ovindex prefix The common installation prefix for all files. If @code{exec_prefix} is defined to a different value, @code{prefix} is used only for architecture-independent files. @end defvar @defvar psdir @ovindex psdir The directory for installing PostScript documentation. @end defvar @defvar sbindir @ovindex sbindir The directory for installing executables that system administrators run. @end defvar @defvar sharedstatedir @ovindex sharedstatedir The directory for installing modifiable architecture-independent data. @end defvar @defvar sysconfdir @ovindex sysconfdir The directory for installing read-only single-machine data. @end defvar Most of these variables have values that rely on @code{prefix} or @code{exec_prefix}. It is deliberate that the directory output variables keep them unexpanded: typically @samp{@@datarootdir@@} is replaced by @samp{$@{prefix@}/share}, not @samp{/usr/local/share}, and @samp{@@datadir@@} is replaced by @samp{$@{datarootdir@}}. This behavior is mandated by the GNU Coding Standards, so that when the user runs: @table @samp @item make she can still specify a different prefix from the one specified to @command{configure}, in which case, if needed, the package should hard code dependencies corresponding to the make-specified prefix. @item make install she can specify a different installation location, in which case the package @emph{must} still depend on the location which was compiled in (i.e., never recompile when @samp{make install} is run). This is an extremely important feature, as many people may decide to install all the files of a package grouped together, and then install links from the final locations to there. @end table In order to support these features, it is essential that @code{datarootdir} remains defined as @samp{$@{prefix@}/share}, so that its value can be expanded based on the current value of @code{prefix}. A corollary is that you should not use these variables except in makefiles. For instance, instead of trying to evaluate @code{datadir} in @file{configure} and hard-coding it in makefiles using e.g., @samp{AC_DEFINE_UNQUOTED([DATADIR], ["$datadir"], [Data directory.])}, you should add @option{-DDATADIR='$(datadir)'} to your makefile's definition of @code{CPPFLAGS} (@code{AM_CPPFLAGS} if you are also using Automake). Similarly, you should not rely on @code{AC_CONFIG_FILES} to replace @code{bindir} and friends in your shell scripts and other files; instead, let @command{make} manage their replacement. For instance Autoconf ships templates of its shell scripts ending with @samp{.in}, and uses a makefile snippet similar to the following to build scripts like @command{autoheader} and @command{autom4te}: @example @group edit = sed \ -e 's|@@bindir[@@]|$(bindir)|g' \ -e 's|@@pkgdatadir[@@]|$(pkgdatadir)|g' \ -e 's|@@prefix[@@]|$(prefix)|g' @end group @group autoheader autom4te: Makefile rm -f $@@ $@@.tmp srcdir=''; \ test -f ./$@@.in || srcdir=$(srcdir)/; \ $(edit) $$@{srcdir@}$@@.in >$@@.tmp @c $$ restore font-lock chmod +x $@@.tmp chmod a-w $@@.tmp mv $@@.tmp $@@ @end group @group autoheader: $(srcdir)/autoheader.in autom4te: $(srcdir)/autom4te.in @end group @end example Some details are noteworthy: @table @asis @item @samp{@@bindir[@@]} The brackets prevent @command{configure} from replacing @samp{@@bindir@@} in the Sed expression itself. Brackets are preferable to a backslash here, since Posix says @samp{\@@} is not portable. @item @samp{$(bindir)} Don't use @samp{@@bindir@@}! Use the matching makefile variable instead. @item @samp{$(pkgdatadir)} The example takes advantage of the variable @samp{$(pkgdatadir)} provided by Automake; it is equivalent to @samp{$(datadir)/$(PACKAGE)}. @item @samp{/} Don't use @samp{/} in the Sed expressions that replace file names since most likely the variables you use, such as @samp{$(bindir)}, contain @samp{/}. Use a shell metacharacter instead, such as @samp{|}. @item special characters File names, file name components, and the value of @code{VPATH} should not contain shell metacharacters or white space. @xref{Special Chars in Variables}. @item dependency on @file{Makefile} Since @code{edit} uses values that depend on the configuration specific values (@code{prefix}, etc.)@: and not only on @code{VERSION} and so forth, the output depends on @file{Makefile}, not @file{configure.ac}. @item @samp{$@@} The main rule is generic, and uses @samp{$@@} extensively to avoid the need for multiple copies of the rule. @item Separated dependencies and single suffix rules You can't use them! The above snippet cannot be (portably) rewritten as: @example autoconf autoheader: Makefile @group .in: rm -f $@@ $@@.tmp $(edit) $< >$@@.tmp chmod +x $@@.tmp mv $@@.tmp $@@ @end group @end example @xref{Single Suffix Rules}, for details. @item @samp{$(srcdir)} Be sure to specify the name of the source directory, otherwise the package won't support separated builds. @end table For the more specific installation of Erlang libraries, the following variables are defined: @defvar ERLANG_INSTALL_LIB_DIR @ovindex ERLANG_INSTALL_LIB_DIR @acindex{ERLANG_SUBST_INSTALL_LIB_DIR} The common parent directory of Erlang library installation directories. This variable is set by calling the @code{AC_ERLANG_SUBST_INSTALL_LIB_DIR} macro in @file{configure.ac}. @end defvar @defvar ERLANG_INSTALL_LIB_DIR_@var{library} @ovindex ERLANG_INSTALL_LIB_DIR_@var{library} @acindex{ERLANG_SUBST_INSTALL_LIB_SUBDIR} The installation directory for Erlang library @var{library}. This variable is set by using the @samp{AC_ERLANG_SUBST_INSTALL_LIB_SUBDIR} macro in @file{configure.ac}. @end defvar @xref{Erlang Libraries}, for details. @node Changed Directory Variables @subsection Changed Directory Variables @cindex @file{datarootdir} In Autoconf 2.60, the set of directory variables has changed, and the defaults of some variables have been adjusted (@pxref{Installation Directory Variables}) to changes in the GNU Coding Standards. Notably, @file{datadir}, @file{infodir}, and @file{mandir} are now expressed in terms of @file{datarootdir}. If you are upgrading from an earlier Autoconf version, you may need to adjust your files to ensure that the directory variables are substituted correctly (@pxref{Defining Directories}), and that a definition of @file{datarootdir} is in place. For example, in a @file{Makefile.in}, adding @example datarootdir = @@datarootdir@@ @end example @noindent is usually sufficient. If you use Automake to create @file{Makefile.in}, it will add this for you. To help with the transition, Autoconf warns about files that seem to use @code{datarootdir} without defining it. In some cases, it then expands the value of @code{$datarootdir} in substitutions of the directory variables. The following example shows such a warning: @example $ @kbd{cat configure.ac} AC_INIT AC_CONFIG_FILES([Makefile]) AC_OUTPUT $ @kbd{cat Makefile.in} prefix = @@prefix@@ datadir = @@datadir@@ $ @kbd{autoconf} $ @kbd{configure} configure: creating ./config.status config.status: creating Makefile config.status: WARNING: Makefile.in seems to ignore the --datarootdir setting $ @kbd{cat Makefile} prefix = /usr/local datadir = $@{prefix@}/share @end example Usually one can easily change the file to accommodate both older and newer Autoconf releases: @example $ @kbd{cat Makefile.in} prefix = @@prefix@@ datarootdir = @@datarootdir@@ datadir = @@datadir@@ $ @kbd{configure} configure: creating ./config.status config.status: creating Makefile $ @kbd{cat Makefile} prefix = /usr/local datarootdir = $@{prefix@}/share datadir = $@{datarootdir@} @end example @acindex{DATAROOTDIR_CHECKED} In some cases, however, the checks may not be able to detect that a suitable initialization of @code{datarootdir} is in place, or they may fail to detect that such an initialization is necessary in the output file. If, after auditing your package, there are still spurious @file{configure} warnings about @code{datarootdir}, you may add the line @example AC_DEFUN([AC_DATAROOTDIR_CHECKED]) @end example @noindent to your @file{configure.ac} to disable the warnings. This is an exception to the usual rule that you should not define a macro whose name begins with @code{AC_} (@pxref{Macro Names}). @node Build Directories @subsection Build Directories @cindex Build directories @cindex Directories, build You can support compiling a software package for several architectures simultaneously from the same copy of the source code. The object files for each architecture are kept in their own directory. To support doing this, @command{make} uses the @code{VPATH} variable to find the files that are in the source directory. GNU Make can do this. Most other recent @command{make} programs can do this as well, though they may have difficulties and it is often simpler to recommend GNU @command{make} (@pxref{VPATH and Make}). Older @command{make} programs do not support @code{VPATH}; when using them, the source code must be in the same directory as the object files. If you are using GNU Automake, the remaining details in this section are already covered for you, based on the contents of your @file{Makefile.am}. But if you are using Autoconf in isolation, then supporting @code{VPATH} requires the following in your @file{Makefile.in}: @example srcdir = @@srcdir@@ VPATH = @@srcdir@@ @end example Do not set @code{VPATH} to the value of another variable (@pxref{Variables listed in VPATH}. @command{configure} substitutes the correct value for @code{srcdir} when it produces @file{Makefile}. Do not use the @command{make} variable @code{$<}, which expands to the file name of the file in the source directory (found with @code{VPATH}), except in implicit rules. (An implicit rule is one such as @samp{.c.o}, which tells how to create a @file{.o} file from a @file{.c} file.) Some versions of @command{make} do not set @code{$<} in explicit rules; they expand it to an empty value. Instead, Make command lines should always refer to source files by prefixing them with @samp{$(srcdir)/}. It's safer to quote the source directory name, in case it contains characters that are special to the shell. Because @samp{$(srcdir)} is expanded by Make, single-quoting works and is safer than double-quoting. For example: @example time.info: time.texinfo $(MAKEINFO) '$(srcdir)/time.texinfo' @end example @node Automatic Remaking @subsection Automatic Remaking @cindex Automatic remaking @cindex Remaking automatically You can put rules like the following in the top-level @file{Makefile.in} for a package to automatically update the configuration information when you change the configuration files. This example includes all of the optional files, such as @file{aclocal.m4} and those related to configuration header files. Omit from the @file{Makefile.in} rules for any of these files that your package does not use. The @samp{$(srcdir)/} prefix is included because of limitations in the @code{VPATH} mechanism. The @file{stamp-} files are necessary because the timestamps of @file{config.h.in} and @file{config.h} are not changed if remaking them does not change their contents. This feature avoids unnecessary recompilation. You should include the file @file{stamp-h.in} in your package's distribution, so that @command{make} considers @file{config.h.in} up to date. Don't use @command{touch} (@pxref{touch, , Limitations of Usual Tools}); instead, use @command{echo} (using @command{date} would cause needless differences, hence CVS conflicts, etc.). @example @group $(srcdir)/configure: configure.ac aclocal.m4 cd '$(srcdir)' && autoconf # autoheader might not change config.h.in, so touch a stamp file. $(srcdir)/config.h.in: stamp-h.in ; $(srcdir)/stamp-h.in: configure.ac aclocal.m4 cd '$(srcdir)' && autoheader echo timestamp > '$(srcdir)/stamp-h.in' config.h: stamp-h ; stamp-h: config.h.in config.status ./config.status Makefile: Makefile.in config.status ./config.status config.status: configure ./config.status --recheck @end group @end example @noindent (Be careful if you copy these lines directly into your makefile, as you need to convert the indented lines to start with the tab character.) In addition, you should use @example AC_CONFIG_FILES([stamp-h], [echo timestamp > stamp-h]) @end example @noindent so @file{config.status} ensures that @file{config.h} is considered up to date. @xref{Output}, for more information about @code{AC_OUTPUT}. @xref{config.status Invocation}, for more examples of handling configuration-related dependencies. @node Configuration Headers @section Configuration Header Files @cindex Configuration Header @cindex @file{config.h} When a package contains more than a few tests that define C preprocessor symbols, the command lines to pass @option{-D} options to the compiler can get quite long. This causes two problems. One is that the @command{make} output is hard to visually scan for errors. More seriously, the command lines can exceed the length limits of some operating systems. As an alternative to passing @option{-D} options to the compiler, @command{configure} scripts can create a C header file containing @samp{#define} directives. The @code{AC_CONFIG_HEADERS} macro selects this kind of output. Though it can be called anywhere between @code{AC_INIT} and @code{AC_OUTPUT}, it is customary to call it right after @code{AC_INIT}. The package should @samp{#include} the configuration header file before any other header files, to prevent inconsistencies in declarations (for example, if it redefines @code{const}, or if it defines a macro like @code{_FILE_OFFSET_BITS} that affects the behavior of system headers). Note that it is okay to only include @file{config.h} from @file{.c} files; the project's @file{.h} files can rely on @file{config.h} already being included first by the corresponding @file{.c} file. To provide for VPATH builds, remember to pass the C compiler a @option{-I.} option (or @option{-I..}; whichever directory contains @file{config.h}). Even if you use @samp{#include "config.h"}, the preprocessor searches only the directory of the currently read file, i.e., the source directory, not the build directory. With the appropriate @option{-I} option, you can use @samp{#include <config.h>}. Actually, it's a good habit to use it, because in the rare case when the source directory contains another @file{config.h}, the build directory should be searched first. @defmac AC_CONFIG_HEADERS (@var{header} @dots{}, @ovar{cmds}, @ovar{init-cmds}) @acindex{CONFIG_HEADERS} @cvindex HAVE_CONFIG_H This macro is one of the instantiating macros; see @ref{Configuration Actions}. Make @code{AC_OUTPUT} create the file(s) in the blank-or-newline-separated list @var{header} containing C preprocessor @code{#define} statements, and replace @samp{@@DEFS@@} in generated files with @option{-DHAVE_CONFIG_H} instead of the value of @code{DEFS}. The usual name for @var{header} is @file{config.h}; @var{header} should not contain shell metacharacters. @xref{Special Chars in Variables}. If @var{header} already exists and its contents are identical to what @code{AC_OUTPUT} would put in it, it is left alone. Doing this allows making some changes in the configuration without needlessly causing object files that depend on the header file to be recompiled. Usually the input file is named @file{@var{header}.in}; however, you can override the input file name by appending to @var{header} a colon-separated list of input files. For example, you might need to make the input file name acceptable to DOS variants: @example AC_CONFIG_HEADERS([config.h:config.hin]) @end example @end defmac @defmac AH_HEADER @ahindex{HEADER} This macro is defined as the name of the first declared config header and undefined if no config headers have been declared up to this point. A third-party macro may, for example, require use of a config header without invoking AC_CONFIG_HEADERS twice, like this: @example AC_CONFIG_COMMANDS_PRE( [m4_ifndef([AH_HEADER], [AC_CONFIG_HEADERS([config.h])])]) @end example @end defmac @xref{Configuration Actions}, for more details on @var{header}. @menu * Header Templates:: Input for the configuration headers * autoheader Invocation:: How to create configuration templates * Autoheader Macros:: How to specify CPP templates @end menu @node Header Templates @subsection Configuration Header Templates @cindex Configuration Header Template @cindex Header templates @cindex @file{config.h.in} Your distribution should contain a template file that looks as you want the final header file to look, including comments, with @code{#undef} statements which are used as hooks. For example, suppose your @file{configure.ac} makes these calls: @example AC_CONFIG_HEADERS([conf.h]) AC_CHECK_HEADERS([unistd.h]) @end example @noindent Then you could have code like the following in @file{conf.h.in}. The @file{conf.h} created by @command{configure} defines @samp{HAVE_UNISTD_H} to 1, if and only if the system has @file{unistd.h}. @example @group /* Define as 1 if you have unistd.h. */ #undef HAVE_UNISTD_H @end group @end example The format of the template file is stricter than what the C preprocessor is required to accept. A directive line should contain only whitespace, @samp{#undef}, and @samp{HAVE_UNISTD_H}. The use of @samp{#define} instead of @samp{#undef}, or of comments on the same line as @samp{#undef}, is strongly discouraged. Each hook should only be listed once. Other preprocessor lines, such as @samp{#ifdef} or @samp{#include}, are copied verbatim from the template into the generated header. Since it is a tedious task to keep a template header up to date, you may use @command{autoheader} to generate it, see @ref{autoheader Invocation}. During the instantiation of the header, each @samp{#undef} line in the template file for each symbol defined by @samp{AC_DEFINE} is changed to an appropriate @samp{#define}. If the corresponding @samp{AC_DEFINE} has not been executed during the @command{configure} run, the @samp{#undef} line is commented out. (This is important, e.g., for @samp{_POSIX_SOURCE}: on many systems, it can be implicitly defined by the compiler, and undefining it in the header would then break compilation of subsequent headers.) Currently, @emph{all} remaining @samp{#undef} lines in the header template are commented out, whether or not there was a corresponding @samp{AC_DEFINE} for the macro name; but this behavior is not guaranteed for future releases of Autoconf. Generally speaking, since you should not use @samp{#define}, and you cannot guarantee whether a @samp{#undef} directive in the header template will be converted to a @samp{#define} or commented out in the generated header file, the template file cannot be used for conditional definition effects. Consequently, if you need to use the construct @example @group #ifdef THIS # define THAT #endif @end group @end example @noindent you must place it outside of the template. If you absolutely need to hook it to the config header itself, please put the directives to a separate file, and @samp{#include} that file from the config header template. If you are using @command{autoheader}, you would probably use @samp{AH_BOTTOM} to append the @samp{#include} directive. @node autoheader Invocation @subsection Using @command{autoheader} to Create @file{config.h.in} @cindex @command{autoheader} The @command{autoheader} program can create a template file of C @samp{#define} statements for @command{configure} to use. It searches for the first invocation of @code{AC_CONFIG_HEADERS} in @file{configure} sources to determine the name of the template. (If the first call of @code{AC_CONFIG_HEADERS} specifies more than one input file name, @command{autoheader} uses the first one.) It is recommended that only one input file is used. If you want to append a boilerplate code, it is preferable to use @samp{AH_BOTTOM([#include <conf_post.h>])}. File @file{conf_post.h} is not processed during the configuration then, which make things clearer. Analogically, @code{AH_TOP} can be used to prepend a boilerplate code. In order to do its job, @command{autoheader} needs you to document all of the symbols that you might use. Typically this is done via an @code{AC_DEFINE} or @code{AC_DEFINE_UNQUOTED} call whose first argument is a literal symbol and whose third argument describes the symbol (@pxref{Defining Symbols}). Alternatively, you can use @code{AH_TEMPLATE} (@pxref{Autoheader Macros}), or you can supply a suitable input file for a subsequent configuration header file. Symbols defined by Autoconf's builtin tests are already documented properly; you need to document only those that you define yourself. You might wonder why @command{autoheader} is needed: after all, why would @command{configure} need to ``patch'' a @file{config.h.in} to produce a @file{config.h} instead of just creating @file{config.h} from scratch? Well, when everything rocks, the answer is just that we are wasting our time maintaining @command{autoheader}: generating @file{config.h} directly is all that is needed. When things go wrong, however, you'll be thankful for the existence of @command{autoheader}. The fact that the symbols are documented is important in order to @emph{check} that @file{config.h} makes sense. The fact that there is a well-defined list of symbols that should be defined (or not) is also important for people who are porting packages to environments where @command{configure} cannot be run: they just have to @emph{fill in the blanks}. But let's come back to the point: the invocation of @command{autoheader}@dots{} If you give @command{autoheader} an argument, it uses that file instead of @file{configure.ac} and writes the header file to the standard output instead of to @file{config.h.in}. If you give @command{autoheader} an argument of @option{-}, it reads the standard input instead of @file{configure.ac} and writes the header file to the standard output. @command{autoheader} accepts the following options: @table @option @item --help @itemx -h Print a summary of the command line options and exit. @item --version @itemx -V Print the version number of Autoconf and exit. @item --verbose @itemx -v Report processing steps. @item --debug @itemx -d Don't remove the temporary files. @item --force @itemx -f Remake the template file even if newer than its input files. @item --include=@var{dir} @itemx -I @var{dir} Append @var{dir} to the include path. Multiple invocations accumulate. @item --prepend-include=@var{dir} @itemx -B @var{dir} Prepend @var{dir} to the include path. Multiple invocations accumulate. @item --warnings=@var{category}[,@var{category}...] @itemx -W@var{category}[,@var{category}...] @evindex WARNINGS Enable or disable warnings related to each @var{category}. @xref{m4_warn}, for a comprehensive list of categories. Special values include: @table @samp @item all Enable all categories of warnings. @item none Disable all categories of warnings. @item error Treat all warnings as errors. @item no-@var{category} Disable warnings falling into @var{category}. @end table The enviroment variable @env{WARNINGS} may also be set to a comma-separated list of warning categories to enable or disable. It is interpreted exactly the same way as the argument of @option{--warnings}, but unknown categories are silently ignored. The command line takes precedence; for instance, if @env{WARNINGS} is set to @code{obsolete}, but @option{-Wnone} is given on the command line, no warnings will be issued. Some categories of warnings are on by default. Again, for details see @ref{m4_warn}. @end table @node Autoheader Macros @subsection Autoheader Macros @cindex Autoheader macros @command{autoheader} scans @file{configure.ac} and figures out which C preprocessor symbols it might define. It knows how to generate templates for symbols defined by @code{AC_CHECK_HEADERS}, @code{AC_CHECK_FUNCS} etc., but if you @code{AC_DEFINE} any additional symbol, you must define a template for it. If there are missing templates, @command{autoheader} fails with an error message. The template for a @var{symbol} is created by @command{autoheader} from the @var{description} argument to an @code{AC_DEFINE}; see @ref{Defining Symbols}. For special needs, you can use the following macros. @defmac AH_TEMPLATE (@var{key}, @var{description}) @ahindex{TEMPLATE} Tell @command{autoheader} to generate a template for @var{key}. This macro generates standard templates just like @code{AC_DEFINE} when a @var{description} is given. For example: @example AH_TEMPLATE([NULL_DEVICE], [Name of the file to open to get a null file, or a data sink.]) @end example @noindent generates the following template, with the description properly justified. @example /* Name of the file to open to get a null file, or a data sink. */ #undef NULL_DEVICE @end example @end defmac @defmac AH_VERBATIM (@var{key}, @var{template}) @ahindex{VERBATIM} Tell @command{autoheader} to include the @var{template} as-is in the header template file. This @var{template} is associated with the @var{key}, which is used to sort all the different templates and guarantee their uniqueness. It should be a symbol that can be defined via @code{AC_DEFINE}. @end defmac @defmac AH_TOP (@var{text}) @ahindex{TOP} Include @var{text} at the top of the header template file. @end defmac @defmac AH_BOTTOM (@var{text}) @ahindex{BOTTOM} Include @var{text} at the bottom of the header template file. @end defmac Please note that @var{text} gets included ``verbatim'' to the template file, not to the resulting config header, so it can easily get mangled when the template is processed. There is rarely a need for something other than @example AH_BOTTOM([#include <custom.h>]) @end example @node Configuration Commands @section Running Arbitrary Configuration Commands @cindex Configuration commands @cindex Commands for configuration You can execute arbitrary commands before, during, and after @file{config.status} is run. The three following macros accumulate the commands to run when they are called multiple times. @code{AC_CONFIG_COMMANDS} replaces the obsolete macro @code{AC_OUTPUT_COMMANDS}; see @ref{Obsolete Macros}, for details. @anchor{AC_CONFIG_COMMANDS} @defmac AC_CONFIG_COMMANDS (@var{tag}@dots{}, @ovar{cmds}, @ovar{init-cmds}) @acindex{CONFIG_COMMANDS} Specify additional shell commands to run at the end of @file{config.status}, and shell commands to initialize any variables from @command{configure}. Associate the commands with @var{tag}. Since typically the @var{cmds} create a file, @var{tag} should naturally be the name of that file. If needed, the directory hosting @var{tag} is created. The @var{tag} should not contain shell metacharacters. @xref{Special Chars in Variables}. This macro is one of the instantiating macros; see @ref{Configuration Actions}. Here is an unrealistic example: @example fubar=42 AC_CONFIG_COMMANDS([fubar], [echo this is extra $fubar, and so on.], [fubar=$fubar]) @end example Here is a better one: @example AC_CONFIG_COMMANDS([timestamp], [date >timestamp]) @end example @end defmac The following two macros look similar, but in fact they are not of the same breed: they are executed directly by @file{configure}, so you cannot use @file{config.status} to rerun them. @c Yet it is good to leave them here. The user sees them together and @c decides which best fits their needs. @defmac AC_CONFIG_COMMANDS_PRE (@var{cmds}) @acindex{CONFIG_COMMANDS_PRE} Execute the @var{cmds} right before creating @file{config.status}. This macro presents the last opportunity to call @code{AC_SUBST}, @code{AC_DEFINE}, or @code{AC_CONFIG_@var{ITEMS}} macros. @end defmac @defmac AC_CONFIG_COMMANDS_POST (@var{cmds}) @acindex{CONFIG_COMMANDS_POST} Execute the @var{cmds} right after creating @file{config.status}. @end defmac @node Configuration Links @section Creating Configuration Links @cindex Configuration links @cindex Links for configuration You may find it convenient to create links whose destinations depend upon results of tests. One can use @code{AC_CONFIG_COMMANDS} but the creation of relative symbolic links can be delicate when the package is built in a directory different from the source directory. @anchor{AC_CONFIG_LINKS} @defmac AC_CONFIG_LINKS (@var{dest}:@var{source}@dots{}, @ovar{cmds}, @ @ovar{init-cmds}) @acindex{CONFIG_LINKS} @cindex Links Make @code{AC_OUTPUT} link each of the existing files @var{source} to the corresponding link name @var{dest}. Makes a symbolic link if possible, otherwise a hard link if possible, otherwise a copy. The @var{dest} and @var{source} names should be relative to the top level source or build directory, and should not contain shell metacharacters. @xref{Special Chars in Variables}. This macro is one of the instantiating macros; see @ref{Configuration Actions}. For example, this call: @example AC_CONFIG_LINKS([host.h:config/$machine.h object.h:config/$obj_format.h]) @end example @noindent creates in the current directory @file{host.h} as a link to @file{@var{srcdir}/config/$machine.h}, and @file{object.h} as a link to @file{@var{srcdir}/config/$obj_format.h}. The tempting value @samp{.} for @var{dest} is invalid: it makes it impossible for @samp{config.status} to guess the links to establish. One can then run: @example ./config.status host.h object.h @end example @noindent to create the links. @end defmac @node Subdirectories @section Configuring Other Packages in Subdirectories @cindex Configure subdirectories @cindex Subdirectory configure In most situations, calling @code{AC_OUTPUT} is sufficient to produce makefiles in subdirectories. However, @command{configure} scripts that control more than one independent package can use @code{AC_CONFIG_SUBDIRS} to run @command{configure} scripts for other packages in subdirectories. @defmac AC_CONFIG_SUBDIRS (@var{dir} @dots{}) @acindex{CONFIG_SUBDIRS} @ovindex subdirs Make @code{AC_OUTPUT} run @command{configure} in each subdirectory @var{dir} in the given blank-or-newline-separated list. Each @var{dir} should be a literal, i.e., please do not use: @example @c If you change this example, adjust tests/torture.at:Non-literal AC_CONFIG_SUBDIRS. if test "x$package_foo_enabled" = xyes; then my_subdirs="$my_subdirs foo" fi AC_CONFIG_SUBDIRS([$my_subdirs]) @end example @noindent because this prevents @samp{./configure --help=recursive} from displaying the options of the package @code{foo}. Instead, you should write: @example if test "x$package_foo_enabled" = xyes; then AC_CONFIG_SUBDIRS([foo]) fi @end example If a given @var{dir} is not found at @command{configure} run time, a warning is reported; if the subdirectory is optional, write: @example if test -d "$srcdir/foo"; then AC_CONFIG_SUBDIRS([foo]) fi @end example If a given @var{dir} contains @command{configure.gnu}, it is run instead of @command{configure}. This is for packages that might use a non-Autoconf script @command{Configure}, which can't be called through a wrapper @command{configure} since it would be the same file on case-insensitive file systems. The subdirectory @command{configure} scripts are given the same command line options that were given to this @command{configure} script, with minor changes if needed, which include: @itemize @minus @item adjusting a relative name for the cache file; @item adjusting a relative name for the source directory; @item propagating the current value of @code{$prefix}, including if it was defaulted, and if the default values of the top level and of the subdirectory @file{configure} differ. @end itemize This macro also sets the output variable @code{subdirs} to the list of directories @samp{@var{dir} @dots{}}. Make rules can use this variable to determine which subdirectories to recurse into. This macro may be called multiple times. @end defmac @node Default Prefix @section Default Prefix @cindex Install prefix @cindex Prefix for install By default, @command{configure} sets the prefix for files it installs to @file{/usr/local}. The user of @command{configure} can select a different prefix using the @option{--prefix} and @option{--exec-prefix} options. There are two ways to change the default: when creating @command{configure}, and when running it. Some software packages might want to install in a directory other than @file{/usr/local} by default. To accomplish that, use the @code{AC_PREFIX_DEFAULT} macro. @defmac AC_PREFIX_DEFAULT (@var{prefix}) @acindex{PREFIX_DEFAULT} Set the default installation prefix to @var{prefix} instead of @file{/usr/local}. @end defmac It may be convenient for users to have @command{configure} guess the installation prefix from the location of a related program that they have already installed. If you wish to do that, you can call @code{AC_PREFIX_PROGRAM}. @anchor{AC_PREFIX_PROGRAM} @defmac AC_PREFIX_PROGRAM (@var{program}) @acindex{PREFIX_PROGRAM} If the user did not specify an installation prefix (using the @option{--prefix} option), guess a value for it by looking for @var{program} in @env{PATH}, the way the shell does. If @var{program} is found, set the prefix to the parent of the directory containing @var{program}, else default the prefix as described above (@file{/usr/local} or @code{AC_PREFIX_DEFAULT}). For example, if @var{program} is @code{gcc} and the @env{PATH} contains @file{/usr/local/gnu/bin/gcc}, set the prefix to @file{/usr/local/gnu}. @end defmac @c ======================================================== Existing tests @node Existing Tests @chapter Existing Tests These macros test for particular system features that packages might need or want to use. If you need to test for a kind of feature that none of these macros check for, you can probably do it by calling primitive test macros with appropriate arguments (@pxref{Writing Tests}). These tests print messages telling the user which feature they're checking for, and what they find. They cache their results for future @command{configure} runs (@pxref{Caching Results}). Some of these macros set output variables. @xref{Makefile Substitutions}, for how to get their values. The phrase ``define @var{name}'' is used below as a shorthand to mean ``define the C preprocessor symbol @var{name} to the value 1''. @xref{Defining Symbols}, for how to get those symbol definitions into your program. @menu * Common Behavior:: Macros' standard schemes * Alternative Programs:: Selecting between alternative programs * Files:: Checking for the existence of files * Libraries:: Library archives that might be missing * Library Functions:: C library functions that might be missing * Header Files:: Header files that might be missing * Declarations:: Declarations that may be missing * Structures:: Structures or members that might be missing * Types:: Types that might be missing * Compilers and Preprocessors:: Checking for compiling programs * System Services:: Operating system services * C and Posix Variants:: Kludges for C and Posix variants * Erlang Libraries:: Checking for the existence of Erlang libraries @end menu @node Common Behavior @section Common Behavior @cindex Common autoconf behavior Much effort has been expended to make Autoconf easy to learn. The most obvious way to reach this goal is simply to enforce standard interfaces and behaviors, avoiding exceptions as much as possible. Because of history and inertia, unfortunately, there are still too many exceptions in Autoconf; nevertheless, this section describes some of the common rules. @menu * Standard Symbols:: Symbols defined by the macros * Default Includes:: Includes used by the generic macros @end menu @node Standard Symbols @subsection Standard Symbols @cindex Standard symbols All the generic macros that @code{AC_DEFINE} a symbol as a result of their test transform their @var{argument} values to a standard alphabet. First, @var{argument} is converted to upper case and any asterisks (@samp{*}) are each converted to @samp{P}. Any remaining characters that are not alphanumeric are converted to underscores. For instance, @example AC_CHECK_TYPES([struct $Expensive*]) @end example @noindent defines the symbol @samp{HAVE_STRUCT__EXPENSIVEP} if the check succeeds. @node Default Includes @subsection Default Includes @cindex Default includes @cindex Includes, default @hdrindex{assert.h} @hdrindex{ctype.h} @hdrindex{errno.h} @hdrindex{float.h} @hdrindex{iso646.h} @hdrindex{limits.h} @hdrindex{locale.h} @hdrindex{math.h} @hdrindex{setjmp.h} @hdrindex{signal.h} @hdrindex{stdarg.h} @hdrindex{stddef.h} @hdrindex{stdio.h} @hdrindex{stdlib.h} @hdrindex{string.h} @hdrindex{time.h} @hdrindex{wchar.h} @hdrindex{wctype.h} Test programs frequently need to include headers that may or may not be available on the system whose features are being tested. Each test can use all the preprocessor macros that have been @code{AC_DEFINE}d by previous tests, so for example one may write @example @group #include <time.h> #ifdef HAVE_SYS_TIME_H # include <sys/time.h> #endif @end group @end example @noindent if @file{sys/time.h} has already been tested for. All hosted environments that are still of interest for portable code provide all of the headers specified in ISO C90 (as amended in 1995): @file{assert.h}, @file{ctype.h}, @file{errno.h}, @file{float.h}, @file{iso646.h}, @file{limits.h}, @file{locale.h}, @file{math.h}, @file{setjmp.h}, @file{signal.h}, @file{stdarg.h}, @file{stddef.h}, @file{stdio.h}, @file{stdlib.h}, @file{string.h}, @file{time.h}, @file{wchar.h}, and @file{wctype.h}. Most programs can safely include these headers unconditionally. All other headers, including all headers from later revisions of the C standard, need to be tested for (@pxref{Header Files}). If your program needs to be portable to a @emph{freestanding} environment, such as an embedded OS that doesn't provide all of the facilities of the C90 standard library, you may need to test for some of the above headers as well. Note that many Autoconf macros internally assume that the complete set of C90 headers are available. Most generic macros use the following macro to provide a default set of includes: @defmac AC_INCLUDES_DEFAULT (@ovar{include-directives}) @acindex{INCLUDES_DEFAULT} Expand to @var{include-directives} if present and nonempty, otherwise to: @example @group #include <stddef.h> #ifdef HAVE_STDIO_H # include <stdio.h> #endif #ifdef HAVE_STDLIB_H # include <stdlib.h> #endif #ifdef HAVE_STRING_H # include <string.h> #endif #ifdef HAVE_INTTYPES_H # include <inttypes.h> #endif #ifdef HAVE_STDINT_H # include <stdint.h> #endif #ifdef HAVE_STRINGS_H # include <strings.h> #endif #ifdef HAVE_SYS_TYPES_H # include <sys/types.h> #endif #ifdef HAVE_SYS_STAT_H # include <sys/stat.h> #endif #ifdef HAVE_UNISTD_H # include <unistd.h> #endif @end group @end example Using this macro without @var{include-directives} has the side effect of checking for @file{stdio.h}, @file{stdlib.h}, @file{string.h}, @file{inttypes.h}, @file{stdint.h}, @file{strings.h}, @file{sys/types.h}, @file{sys/stat.h}, and @file{unistd.h}, as if by @code{AC_CHECK_HEADERS_ONCE}. For backward compatibility, the macro @code{STDC_HEADERS} will be defined when both @file{stdlib.h} and @file{string.h} are available. @strong{Portability Note:} It is safe for most programs to assume the presence of all of the headers required by the original 1990 C standard. @code{AC_INCLUDES_DEFAULT} checks for @file{stdio.h}, @file{stdlib.h}, and @file{string.h}, even though they are in that list, because they might not be available when compiling for a ``freestanding environment'' (in which most of the features of the C library are optional). You probably do not need to write @samp{#ifdef HAVE_STDIO_H} in your own code. @file{inttypes.h} and @file{stdint.h} were added to C in the 1999 revision of the standard, and @file{strings.h}, @file{sys/types.h}, @file{sys/stat.h}, and @file{unistd.h} are POSIX extensions. You @emph{should} guard uses of these headers with appropriate conditionals. @end defmac @defmac AC_CHECK_INCLUDES_DEFAULT @acindex{CHECK_INCLUDES_DEFAULT} Check for all the headers that @code{AC_INCLUDES_DEFAULT} would check for as a side-effect, if this has not already happened. This macro mainly exists so that @code{autoupdate} can replace certain obsolete constructs with it. You should not need to use it yourself; in fact, it is likely to be safe to delete it from any script in which it appears. (@code{autoupdate} does not know whether preprocessor macros such as @code{HAVE_STDINT_H} are used in the program, nor whether they would get defined as a side-effect of other checks.) @end defmac @node Alternative Programs @section Alternative Programs @cindex Programs, checking These macros check for the presence or behavior of particular programs. They are used to choose between several alternative programs and to decide what to do once one has been chosen. If there is no macro specifically defined to check for a program you need, and you don't need to check for any special properties of it, then you can use one of the general program-check macros. @menu * Particular Programs:: Special handling to find certain programs * Generic Programs:: How to find other programs @end menu @node Particular Programs @subsection Particular Program Checks These macros check for particular programs---whether they exist, and in some cases whether they support certain features. @defmac AC_PROG_AWK @acindex{PROG_AWK} @ovindex AWK @caindex prog_AWK Check for @code{gawk}, @code{mawk}, @code{nawk}, and @code{awk}, in that order, and set output variable @code{AWK} to the first one that is found. It tries @code{gawk} first because that is reported to be the best implementation. The result can be overridden by setting the variable @code{AWK} or the cache variable @code{ac_cv_prog_AWK}. Using this macro is sufficient to avoid the pitfalls of traditional @command{awk} (@pxref{awk, , Limitations of Usual Tools}). @end defmac @defmac AC_PROG_GREP @acindex{PROG_GREP} @ovindex GREP @caindex prog_GREP Look for the best available @code{grep} or @code{ggrep} that accepts the longest input lines possible, and that supports multiple @option{-e} options. Set the output variable @code{GREP} to whatever is chosen. @xref{grep, , Limitations of Usual Tools}, for more information about portability problems with the @command{grep} command family. The result can be overridden by setting the @code{GREP} variable and is cached in the @code{ac_cv_path_GREP} variable. @end defmac @defmac AC_PROG_EGREP @acindex{PROG_EGREP} @ovindex EGREP @caindex prog_EGREP Check whether @code{$GREP -E} works, or else look for the best available @code{egrep} or @code{gegrep} that accepts the longest input lines possible. Set the output variable @code{EGREP} to whatever is chosen. The result can be overridden by setting the @code{EGREP} variable and is cached in the @code{ac_cv_path_EGREP} variable. @end defmac @defmac AC_PROG_FGREP @acindex{PROG_FGREP} @ovindex FGREP @caindex prog_FGREP Check whether @code{$GREP -F} works, or else look for the best available @code{fgrep} or @code{gfgrep} that accepts the longest input lines possible. Set the output variable @code{FGREP} to whatever is chosen. The result can be overridden by setting the @code{FGREP} variable and is cached in the @code{ac_cv_path_FGREP} variable. @end defmac @defmac AC_PROG_INSTALL @acindex{PROG_INSTALL} @ovindex INSTALL @ovindex INSTALL_PROGRAM @ovindex INSTALL_DATA @ovindex INSTALL_SCRIPT @caindex path_install @prindex @command{install-sh} Set output variable @code{INSTALL} to the name of a BSD-compatible @command{install} program, if one is found in the current @env{PATH}. Otherwise, set @code{INSTALL} to @samp{@var{dir}/install-sh -c}, checking the directories specified to @code{AC_CONFIG_AUX_DIR} (or its default directories) to determine @var{dir} (@pxref{Output}). Also set the variables @code{INSTALL_PROGRAM} and @code{INSTALL_SCRIPT} to @samp{$@{INSTALL@}} and @code{INSTALL_DATA} to @samp{$@{INSTALL@} -m 644}. @samp{@@INSTALL@@} is special, as its value may vary for different configuration files. This macro screens out various instances of @command{install} known not to work. It prefers to find a C program rather than a shell script, for speed. Instead of @file{install-sh}, it can also use @file{install.sh}, but that name is obsolete because some @command{make} programs have a rule that creates @file{install} from it if there is no makefile. Further, this macro requires @command{install} to be able to install multiple files into a target directory in a single invocation. Autoconf comes with a copy of @file{install-sh} that you can use. If you use @code{AC_PROG_INSTALL}, you must include @file{install-sh} in your distribution; otherwise @command{autoreconf} and @command{configure} will produce an error message saying they can't find it---even if the system you're on has a good @command{install} program. This check is a safety measure to prevent you from accidentally leaving that file out, which would prevent your package from installing on systems that don't have a BSD-compatible @command{install} program. If you need to use your own installation program because it has features not found in standard @command{install} programs, there is no reason to use @code{AC_PROG_INSTALL}; just put the file name of your program into your @file{Makefile.in} files. The result of the test can be overridden by setting the variable @code{INSTALL} or the cache variable @code{ac_cv_path_install}. @end defmac @defmac AC_PROG_MKDIR_P @acindex{PROG_MKDIR_P} @ovindex MKDIR_P @caindex path_mkdir @prindex @command{install-sh} Set output variable @code{MKDIR_P} to a program that ensures that for each argument, a directory named by this argument exists, creating it and its parent directories if needed, and without race conditions when two instances of the program attempt to make the same directory at nearly the same time. This macro uses the @samp{mkdir -p} command if possible. Otherwise, it falls back on invoking @command{install-sh} with the @option{-d} option, so your package should contain @file{install-sh} as described under @code{AC_PROG_INSTALL}. An @file{install-sh} file that predates Autoconf 2.60 or Automake 1.10 is vulnerable to race conditions, so if you want to support parallel installs from different packages into the same directory you need to make sure you have an up-to-date @file{install-sh}. In particular, be careful about using @samp{autoreconf -if} if your Automake predates Automake 1.10. This macro is related to the @code{AS_MKDIR_P} macro (@pxref{Programming in M4sh}), but it sets an output variable intended for use in other files, whereas @code{AS_MKDIR_P} is intended for use in scripts like @command{configure}. Also, @code{AS_MKDIR_P} does not accept options, but @code{MKDIR_P} supports the @option{-m} option, e.g., a makefile might invoke @code{$(MKDIR_P) -m 0 dir} to create an inaccessible directory, and conversely a makefile should use @code{$(MKDIR_P) -- $(FOO)} if @var{FOO} might yield a value that begins with @samp{-}. Finally, @code{AS_MKDIR_P} does not check for race condition vulnerability, whereas @code{AC_PROG_MKDIR_P} does. @samp{@@MKDIR_P@@} is special, as its value may vary for different configuration files. The result of the test can be overridden by setting the variable @code{MKDIR_P} or the cache variable @code{ac_cv_path_mkdir}. @end defmac @anchor{AC_PROG_LEX} @defmac AC_PROG_LEX (@var{options}) @acindex{PROG_LEX} @ovindex LEX @ovindex LEXLIB @cvindex YYTEXT_POINTER @ovindex LEX_OUTPUT_ROOT @caindex prog_LEX Search for a lexical analyzer generator, preferring @code{flex} to plain @code{lex}. Output variable @code{LEX} is set to whichever program is available. If neither program is available, @code{LEX} is set to @samp{:}; for packages that ship the generated @file{file.yy.c} alongside the source @file{file.l}, this default allows users without a lexer generator to still build the package even if the timestamp for @file{file.l} is inadvertently changed. The name of the program to use can be overridden by setting the output variable @code{LEX} or the cache variable @code{ac_cv_prog_LEX} when running @command{configure}. If a lexical analyzer generator is found, this macro performs additional checks for common portability pitfalls. If these additional checks fail, @code{LEX} is reset to @samp{:}; otherwise the following additional macros and variables are provided. Preprocessor macro @code{YYTEXT_POINTER} is defined if the lexer skeleton, by default, declares @code{yytext} as a @samp{@w{char *}} rather than a @samp{@w{char []}}. Output variable @code{LEX_OUTPUT_ROOT} is set to the base of the file name that the lexer generates; this is usually either @file{lex.yy} or @file{lexyy}. If generated lexers need a library to work, output variable @code{LEXLIB} is set to a link option for that library (e.g., @option{-ll}), otherwise it is set to empty. The @var{options} argument modifies the behavior of @code{AC_PROG_LEX}. It should be a whitespace-separated list of options. Currently there are only two options, and they are mutually exclusive: @table @code @item yywrap Indicate that the library in @code{LEXLIB} needs to define the function @code{yywrap}. If a library that defines this function cannot be found, @code{LEX} will be reset to @samp{:}. @item noyywrap Indicate that the library in @code{LEXLIB} does not need to define the function @code{yywrap}. @command{configure} will not search for it at all. @end table Prior to Autoconf 2.70, @code{AC_PROG_LEX} did not take any arguments, and its behavior was different from either of the above possibilities: it would search for a library that defines @code{yywrap}, and would set @code{LEXLIB} to that library if it finds one. However, if a library that defines this function could not be found, @code{LEXLIB} would be left empty and @code{LEX} would @emph{not} be reset. This behavior was due to a bug, but several packages came to depend on it, so @code{AC_PROG_LEX} still does this if neither the @code{yywrap} nor the @code{noyywrap} option is given. Usage of @code{AC_PROG_LEX} without choosing one of the @code{yywrap} or @code{noyywrap} options is deprecated. It is usually better to use @code{noyywrap} and define the @code{yywrap} function yourself, as this almost always renders the @code{LEXLIB} unnecessary. @strong{Caution:} As a side-effect of the test, this macro may delete any file in the configure script's current working directory named @file{lex.yy.c} or @file{lexyy.c}. @strong{Caution:} Packages that ship a generated @file{lex.yy.c} cannot assume that the definition of @code{YYTEXT_POINTER} matches the code in that file. They also cannot assume that @code{LEXLIB} provides the library routines required by the code in that file. If you use Flex to generate @file{lex.yy.c}, you can work around these limitations by defining @code{yywrap} and @code{main} yourself (rendering @code{-lfl} unnecessary), and by using either the @option{--array} or @option{--pointer} options to control how @code{yytext} is declared. The code generated by Flex is also more portable than the code generated by historical versions of Lex. If you have used Flex to generate @file{lex.yy.c}, and especially if your scanner depends on Flex features, we recommend you use this Autoconf snippet to prevent the scanner being regenerated with historical Lex: @example AC_PROG_LEX if test "x$LEX" != xflex; then LEX="$SHELL $missing_dir/missing flex" AC_SUBST([LEX_OUTPUT_ROOT], [lex.yy]) AC_SUBST([LEXLIB], ['']) fi @end example The shell script @command{missing} can be found in the Automake distribution. Remember that the user may have supplied an alternate location in @env{LEX}, so if Flex is required, it is better to check that the user provided something sufficient by parsing the output of @samp{$LEX --version} than by simply relying on @code{test "x$LEX" = xflex}. @end defmac @anchor{AC_PROG_LN_S} @defmac AC_PROG_LN_S @acindex{PROG_LN_S} @ovindex LN_S If @samp{ln -s} works on the current file system (the operating system and file system support symbolic links), set the output variable @code{LN_S} to @samp{ln -s}; otherwise, if @samp{ln} works, set @code{LN_S} to @samp{ln}, and otherwise set it to @samp{cp -pR}. If you make a link in a directory other than the current directory, its meaning depends on whether @samp{ln} or @samp{ln -s} is used. To safely create links using @samp{$(LN_S)}, either find out which form is used and adjust the arguments, or always invoke @code{ln} in the directory where the link is to be created. In other words, it does not work to do: @example $(LN_S) foo /x/bar @end example Instead, do: @example (cd /x && $(LN_S) foo bar) @end example @end defmac @defmac AC_PROG_RANLIB @acindex{PROG_RANLIB} @ovindex RANLIB @c @caindex prog_RANLIB @c @caindex prog_ac_ct_RANLIB Set output variable @code{RANLIB} to @samp{ranlib} if @code{ranlib} is found, and otherwise to @samp{:} (do nothing). @end defmac @defmac AC_PROG_SED @acindex{PROG_SED} @ovindex SED @caindex path_SED Set output variable @code{SED} to a Sed implementation that conforms to Posix and does not have arbitrary length limits. Report an error if no acceptable Sed is found. @xref{sed, , Limitations of Usual Tools}, for more information about portability problems with Sed. The result of this test can be overridden by setting the @code{SED} variable and is cached in the @code{ac_cv_path_SED} variable. @end defmac @defmac AC_PROG_YACC @acindex{PROG_YACC} @evindex YACC @evindex YFLAGS @ovindex YACC @caindex prog_YACC If @code{bison} is found, set output variable @code{YACC} to @samp{bison -y}. Otherwise, if @code{byacc} is found, set @code{YACC} to @samp{byacc}. Otherwise set @code{YACC} to @samp{yacc}. The result of this test can be influenced by setting the variable @code{YACC} or the cache variable @code{ac_cv_prog_YACC}. @end defmac @node Generic Programs @subsection Generic Program and File Checks These macros are used to find programs not covered by the ``particular'' test macros. If you need to check the behavior of a program as well as find out whether it is present, you have to write your own test for it (@pxref{Writing Tests}). By default, these macros use the environment variable @env{PATH}. If you need to check for a program that might not be in the user's @env{PATH}, you can pass a modified path to use instead, like this: @example AC_PATH_PROG([INETD], [inetd], [/usr/libexec/inetd], [$PATH$PATH_SEPARATOR/usr/libexec$PATH_SEPARATOR]dnl [/usr/sbin$PATH_SEPARATOR/usr/etc$PATH_SEPARATOR/etc]) @end example You are strongly encouraged to declare the @var{variable} passed to @code{AC_CHECK_PROG} etc.@: as precious. @xref{Setting Output Variables}, @code{AC_ARG_VAR}, for more details. @anchor{AC_CHECK_PROG} @defmac AC_CHECK_PROG (@var{variable}, @var{prog-to-check-for}, @ @var{value-if-found}, @ovar{value-if-not-found}, @dvar{path, $PATH}, @ @ovar{reject}) @acindex{CHECK_PROG} @caindex prog_@var{variable} Check whether program @var{prog-to-check-for} exists in @var{path}. If it is found, set @var{variable} to @var{value-if-found}, otherwise to @var{value-if-not-found}, if given. Always pass over @var{reject} (an absolute file name) even if it is the first found in the search path; in that case, set @var{variable} using the absolute file name of the @var{prog-to-check-for} found that is not @var{reject}. If @var{variable} was already set, do nothing. Calls @code{AC_SUBST} for @var{variable}. The result of this test can be overridden by setting the @var{variable} variable or the cache variable @code{ac_cv_prog_@var{variable}}. @end defmac @anchor{AC_CHECK_PROGS} @defmac AC_CHECK_PROGS (@var{variable}, @var{progs-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{CHECK_PROGS} @caindex prog_@var{variable} Check for each program in the blank-separated list @var{progs-to-check-for} existing in the @var{path}. If one is found, set @var{variable} to the name of that program. Otherwise, continue checking the next program in the list. If none of the programs in the list are found, set @var{variable} to @var{value-if-not-found}; if @var{value-if-not-found} is not specified, the value of @var{variable} is not changed. Calls @code{AC_SUBST} for @var{variable}. The result of this test can be overridden by setting the @var{variable} variable or the cache variable @code{ac_cv_prog_@var{variable}}. @end defmac @defmac AC_CHECK_TARGET_TOOL (@var{variable}, @var{prog-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{CHECK_TARGET_TOOL} Like @code{AC_CHECK_PROG}, but first looks for @var{prog-to-check-for} with a prefix of the target type as determined by @code{AC_CANONICAL_TARGET}, followed by a dash (@pxref{Canonicalizing}). If the tool cannot be found with a prefix, and if the build and target types are equal, then it is also searched for without a prefix. As noted in @ref{Specifying Target Triplets}, the target is rarely specified, because most of the time it is the same as the host: it is the type of system for which any compiler tool in the package produces code. What this macro looks for is, for example, @emph{a tool @r{(assembler, linker, etc.)}@: that the compiler driver @r{(@command{gcc} for the GNU C Compiler)} uses to produce objects, archives or executables}. @end defmac @defmac AC_CHECK_TOOL (@var{variable}, @var{prog-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{CHECK_TOOL} @c @caindex prog_@var{VARIABLE} @c @caindex prog_ac_ct_@var{VARIABLE} Like @code{AC_CHECK_PROG}, but first looks for @var{prog-to-check-for} with a prefix of the host type as specified by @option{--host}, followed by a dash. For example, if the user runs @samp{configure --build=x86_64-gnu --host=aarch64-linux-gnu}, then this call: @example AC_CHECK_TOOL([RANLIB], [ranlib], [:]) @end example @noindent sets @code{RANLIB} to @file{aarch64-linux-gnu-ranlib} if that program exists in @var{path}, or otherwise to @samp{ranlib} if that program exists in @var{path}, or to @samp{:} if neither program exists. When cross-compiling, this macro will issue a warning if no program prefixed with the host type could be found. For more information, see @ref{Specifying Target Triplets}. @end defmac @defmac AC_CHECK_TARGET_TOOLS (@var{variable}, @var{progs-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{CHECK_TARGET_TOOLS} Like @code{AC_CHECK_TARGET_TOOL}, each of the tools in the list @var{progs-to-check-for} are checked with a prefix of the target type as determined by @code{AC_CANONICAL_TARGET}, followed by a dash (@pxref{Canonicalizing}). If none of the tools can be found with a prefix, and if the build and target types are equal, then the first one without a prefix is used. If a tool is found, set @var{variable} to the name of that program. If none of the tools in the list are found, set @var{variable} to @var{value-if-not-found}; if @var{value-if-not-found} is not specified, the value of @var{variable} is not changed. Calls @code{AC_SUBST} for @var{variable}. @end defmac @defmac AC_CHECK_TOOLS (@var{variable}, @var{progs-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{CHECK_TOOLS} Like @code{AC_CHECK_TOOL}, each of the tools in the list @var{progs-to-check-for} are checked with a prefix of the host type as determined by @code{AC_CANONICAL_HOST}, followed by a dash (@pxref{Canonicalizing}). If none of the tools can be found with a prefix, then the first one without a prefix is used. If a tool is found, set @var{variable} to the name of that program. If none of the tools in the list are found, set @var{variable} to @var{value-if-not-found}; if @var{value-if-not-found} is not specified, the value of @var{variable} is not changed. Calls @code{AC_SUBST} for @var{variable}. When cross-compiling, this macro will issue a warning if no program prefixed with the host type could be found. For more information, see @ref{Specifying Target Triplets}. @end defmac @anchor{AC_PATH_PROG} @defmac AC_PATH_PROG (@var{variable}, @var{prog-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{PATH_PROG} @caindex path_@var{variable} Like @code{AC_CHECK_PROG}, but set @var{variable} to the absolute name of @var{prog-to-check-for} if found. The result of this test can be overridden by setting the @var{variable} variable. A positive result of this test is cached in the @code{ac_cv_path_@var{variable}} variable. @end defmac @anchor{AC_PATH_PROGS} @defmac AC_PATH_PROGS (@var{variable}, @var{progs-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{PATH_PROGS} @caindex path_@var{variable} Like @code{AC_CHECK_PROGS}, but if any of @var{progs-to-check-for} are found, set @var{variable} to the absolute name of the program found. The result of this test can be overridden by setting the @var{variable} variable. A positive result of this test is cached in the @code{ac_cv_path_@var{variable}} variable. @end defmac @defmac AC_PATH_PROGS_FEATURE_CHECK (@var{variable}, @ @var{progs-to-check-for}, @var{feature-test}, @ @ovar{action-if-not-found}, @dvar{path, $PATH}) @acindex{PATH_PROGS_FEATURE_CHECK} @caindex path_@var{variable} @vrindex ac_path_@var{variable} @vrindex ac_path_@var{variable}_found This macro was introduced in Autoconf 2.62. If @var{variable} is not empty, then set the cache variable @code{ac_cv_path_@var{variable}} to its value. Otherwise, check for each program in the blank-separated list @var{progs-to-check-for} existing in @var{path}. For each program found, execute @var{feature-test} with @code{ac_path_@var{variable}} set to the absolute name of the candidate program. If no invocation of @var{feature-test} sets the shell variable @code{ac_cv_path_@var{variable}}, then @var{action-if-not-found} is executed. @var{feature-test} will be run even when @code{ac_cv_path_@var{variable}} is set, to provide the ability to choose a better candidate found later in @var{path}; to accept the current setting and bypass all further checks, @var{feature-test} can execute @code{ac_path_@var{variable}_found=:}. Note that this macro has some subtle differences from @code{AC_CHECK_PROGS}. It is designed to be run inside @code{AC_CACHE_VAL}, therefore, it should have no side effects. In particular, @var{variable} is not set to the final value of @code{ac_cv_path_@var{variable}}, nor is @code{AC_SUBST} automatically run. Also, on failure, any action can be performed, whereas @code{AC_CHECK_PROGS} only performs @code{@var{variable}=@var{value-if-not-found}}. Here is an example, similar to what Autoconf uses in its own configure script. It will search for an implementation of @command{m4} that supports the @code{indir} builtin, even if it goes by the name @command{gm4} or is not the first implementation on @env{PATH}. @example AC_CACHE_CHECK([for m4 that supports indir], [ac_cv_path_M4], [AC_PATH_PROGS_FEATURE_CHECK([M4], [m4 gm4], [[m4out=`echo 'changequote([,])indir([divnum])' | $ac_path_M4` test "x$m4out" = x0 \ && ac_cv_path_M4=$ac_path_M4 ac_path_M4_found=:]], [AC_MSG_ERROR([could not find m4 that supports indir])])]) AC_SUBST([M4], [$ac_cv_path_M4]) @end example @end defmac @defmac AC_PATH_TARGET_TOOL (@var{variable}, @var{prog-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{PATH_TARGET_TOOL} Like @code{AC_CHECK_TARGET_TOOL}, but set @var{variable} to the absolute name of the program if it is found. @end defmac @defmac AC_PATH_TOOL (@var{variable}, @var{prog-to-check-for}, @ @ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{PATH_TOOL} Like @code{AC_CHECK_TOOL}, but set @var{variable} to the absolute name of the program if it is found. When cross-compiling, this macro will issue a warning if no program prefixed with the host type could be found. For more information, see @ref{Specifying Target Triplets}. @end defmac @node Files @section Files @cindex File, checking You might also need to check for the existence of files. Before using these macros, ask yourself whether a runtime test might not be a better solution. Be aware that, like most Autoconf macros, they test a feature of the host machine, and therefore, they die when cross-compiling. @defmac AC_CHECK_FILE (@var{file}, @ovar{action-if-found}, @ @ovar{action-if-not-found}) @acindex{CHECK_FILE} @caindex file_@var{file} Check whether file @var{file} exists on the native system. If it is found, execute @var{action-if-found}, otherwise do @var{action-if-not-found}, if given. Cache the result of this test in the @code{ac_cv_file_@var{file}} variable, with characters not suitable for a variable name mapped to underscores. @end defmac @defmac AC_CHECK_FILES (@var{files}, @ovar{action-if-found}, @ @ovar{action-if-not-found}) @acindex{CHECK_FILES} @caindex file_@var{file} For each file listed in @var{files}, execute @code{AC_CHECK_FILE} and perform either @var{action-if-found} or @var{action-if-not-found}. Like @code{AC_CHECK_FILE}, this defines @samp{HAVE_@var{file}} (@pxref{Standard Symbols}) for each file found and caches the results of each test in the @code{ac_cv_file_@var{file}} variable, with characters not suitable for a variable name mapped to underscores. @end defmac @node Libraries @section Library Files @cindex Library, checking The following macros check for the presence of certain C, C++, Fortran, or Go library archive files. @anchor{AC_CHECK_LIB} @defmac AC_CHECK_LIB (@var{library}, @var{function}, @ @ovar{action-if-found}, @ovar{action-if-not-found}, @ovar{other-libraries}) @acindex{CHECK_LIB} @caindex lib_@var{library}_@var{function} Test whether the library @var{library} is available by trying to link a test program that calls function @var{function} with the library. @var{function} should be a function provided by the library. Use the base name of the library; e.g., to check for @option{-lmp}, use @samp{mp} as the @var{library} argument. @var{action-if-found} is a list of shell commands to run if the link with the library succeeds; @var{action-if-not-found} is a list of shell commands to run if the link fails. If @var{action-if-found} is not specified, the default action prepends @option{-l@var{library}} to @code{LIBS} and defines @samp{HAVE_LIB@var{library}} (in all capitals). This macro is intended to support building @code{LIBS} in a right-to-left (least-dependent to most-dependent) fashion such that library dependencies are satisfied as a natural side effect of consecutive tests. Linkers are sensitive to library ordering so the order in which @code{LIBS} is generated is important to reliable detection of libraries. If linking with @var{library} results in unresolved symbols that would be resolved by linking with additional libraries, give those libraries as the @var{other-libraries} argument, separated by spaces: e.g., @option{-lXt -lX11}. Otherwise, this macro may fail to detect that @var{library} is present, because linking the test program can fail with unresolved symbols. The @var{other-libraries} argument should be limited to cases where it is desirable to test for one library in the presence of another that is not already in @code{LIBS}. @code{AC_CHECK_LIB} requires some care in usage, and should be avoided in some common cases. Many standard functions like @code{gethostbyname} appear in the standard C library on some hosts, and in special libraries like @code{nsl} on other hosts. On some hosts the special libraries contain variant implementations that you may not want to use. These days it is normally better to use @code{AC_SEARCH_LIBS([gethostbyname], [nsl])} instead of @code{AC_CHECK_LIB([nsl], [gethostbyname])}. The result of this test is cached in the @code{ac_cv_lib_@var{library}_@var{function}} variable. @end defmac @anchor{AC_SEARCH_LIBS} @defmac AC_SEARCH_LIBS (@var{function}, @var{search-libs}, @ @ovar{action-if-found}, @ovar{action-if-not-found}, @ovar{other-libraries}) @acindex{SEARCH_LIBS} @caindex search_@var{function} Search for a library defining @var{function} if it's not already available. This equates to calling @samp{AC_LINK_IFELSE([AC_LANG_CALL([], [@var{function}])])} first with no libraries, then for each library listed in @var{search-libs}. Prepend @option{-l@var{library}} to @code{LIBS} for the first library found to contain @var{function}, and run @var{action-if-found}. If the function is not found, run @var{action-if-not-found}. If linking with @var{library} results in unresolved symbols that would be resolved by linking with additional libraries, give those libraries as the @var{other-libraries} argument, separated by spaces: e.g., @option{-lXt -lX11}. Otherwise, this macro fails to detect that @var{function} is present, because linking the test program always fails with unresolved symbols. The result of this test is cached in the @code{ac_cv_search_@var{function}} variable as @samp{none required} if @var{function} is already available, as @samp{no} if no library containing @var{function} was found, otherwise as the @option{-l@var{library}} option that needs to be prepended to @code{LIBS}. @end defmac @node Library Functions @section Library Functions The following macros check for particular C library functions. If there is no macro specifically defined to check for a function you need, and you don't need to check for any special properties of it, then you can use one of the general function-check macros. @menu * Function Portability:: Pitfalls with usual functions * Particular Functions:: Special handling to find certain functions * Generic Functions:: How to find other functions @end menu @node Function Portability @subsection Portability of C Functions @cindex Portability of C functions @cindex C function portability Most usual functions can either be missing, or be buggy, or be limited on some architectures. This section tries to make an inventory of these portability issues. By definition, this list always requires additions. A much more complete list is maintained by the Gnulib project (@pxref{Gnulib}), covering @ref{Function Substitutes, , Current Posix Functions, gnulib, Gnulib}, @ref{Legacy Function Substitutes, , Legacy Functions, gnulib, Gnulib}, and @ref{Glibc Function Substitutes, , Glibc Functions, gnulib, Gnulib}. Please help us keep the Gnulib list as complete as possible. @table @asis @item @code{exit} @c @fuindex exit @prindex @code{exit} On ancient hosts, @code{exit} returned @code{int}. This is because @code{exit} predates @code{void}, and there was a long tradition of it returning @code{int}. On current hosts, the problem more likely is that @code{exit} is not declared, due to C++ problems of some sort or another. For this reason we suggest that test programs not invoke @code{exit}, but return from @code{main} instead. @item @code{free} @c @fuindex free @prindex @code{free} The C standard says a call @code{free (NULL)} does nothing, but some old systems don't support this (e.g., NextStep). @item @code{isinf} @itemx @code{isnan} @c @fuindex isinf @c @fuindex isnan @prindex @code{isinf} @prindex @code{isnan} In C99 and later, @code{isinf} and @code{isnan} are macros. On some systems just macros are available (e.g., HP-UX and Solaris 10), on some systems both macros and functions (e.g., glibc 2.3.2), and on some systems only functions (e.g., IRIX 6 and Solaris 9). In some cases these functions are declared in nonstandard headers like @code{<sunmath.h>} and defined in non-default libraries like @option{-lm} or @option{-lsunmath}. In C99 and later, @code{isinf} and @code{isnan} macros work correctly with @code{long double} arguments, but pre-C99 systems that use functions typically assume @code{double} arguments. On such a system, @code{isinf} incorrectly returns true for a finite @code{long double} argument that is outside the range of @code{double}. The best workaround for these issues is to use Gnulib modules @code{isinf} and @code{isnan} (@pxref{Gnulib}). But a lighter weight solution involves code like the following. @smallexample #include <math.h> #ifndef isnan # define isnan(x) \ (sizeof (x) == sizeof (long double) ? isnan_ld (x) \ : sizeof (x) == sizeof (double) ? isnan_d (x) \ : isnan_f (x)) static int isnan_f (float x) @{ return x != x; @} static int isnan_d (double x) @{ return x != x; @} static int isnan_ld (long double x) @{ return x != x; @} #endif #ifndef isinf # define isinf(x) \ (sizeof (x) == sizeof (long double) ? isinf_ld (x) \ : sizeof (x) == sizeof (double) ? isinf_d (x) \ : isinf_f (x)) static int isinf_f (float x) @{ return !isnan (x) && isnan (x - x); @} static int isinf_d (double x) @{ return !isnan (x) && isnan (x - x); @} static int isinf_ld (long double x) @{ return !isnan (x) && isnan (x - x); @} #endif @end smallexample Some optimizing compilers mishandle these definitions, but systems with that bug typically have many other floating point corner-case compliance problems anyway, so it's probably not worth worrying about. @item @code{malloc} @c @fuindex malloc @prindex @code{malloc} The C standard says a call @code{malloc (0)} is implementation dependent. It can return either @code{NULL} or a new non-null pointer. The latter is more common (e.g., the GNU C Library) but is by no means universal. @code{AC_FUNC_MALLOC} can be used to insist on non-@code{NULL} (@pxref{Particular Functions}). @item @code{putenv} @c @fuindex putenv @prindex @code{putenv} Posix prefers @code{setenv} to @code{putenv}; among other things, @code{putenv} is not required of all Posix implementations, but @code{setenv} is. Posix specifies that @code{putenv} puts the given string directly in @code{environ}, but some systems make a copy of it instead (e.g., glibc 2.0, or BSD). And when a copy is made, @code{unsetenv} might not free it, causing a memory leak (e.g., FreeBSD 4). On some systems @code{putenv ("FOO")} removes @samp{FOO} from the environment, but this is not standard usage and it dumps core on some systems (e.g., AIX). On MinGW, a call @code{putenv ("FOO=")} removes @samp{FOO} from the environment, rather than inserting it with an empty value. @item @code{realloc} @c @fuindex realloc @prindex @code{realloc} The C standard says a call @code{realloc (NULL, size)} is equivalent to @code{malloc (size)}, but some old systems don't support this (e.g., NextStep). @item @code{signal} handler @c @fuindex signal @prindex @code{signal} @prindex @code{sigaction} Normally @code{signal} takes a handler function with a return type of @code{void}, but some old systems required @code{int} instead. Any actual @code{int} value returned is not used; this is only a difference in the function prototype demanded. All systems we know of in current use return @code{void}. The @code{int} was to support K&R C, where of course @code{void} is not available. The obsolete macro @code{AC_TYPE_SIGNAL} (@pxref{AC_TYPE_SIGNAL}) can be used to establish the correct type in all cases. In most cases, it is more robust to use @code{sigaction} when it is available, rather than @code{signal}. @item @code{snprintf} @c @fuindex snprintf @prindex @code{snprintf} @c @fuindex vsnprintf @prindex @code{vsnprintf} In C99 and later, if the output array isn't big enough and if no other errors occur, @code{snprintf} and @code{vsnprintf} truncate the output and return the number of bytes that ought to have been produced. Some older systems return the truncated length (e.g., GNU C Library 2.0.x or IRIX 6.5), some a negative value (e.g., earlier GNU C Library versions), and some the buffer length without truncation (e.g., 32-bit Solaris 7). Also, some buggy older systems ignore the length and overrun the buffer (e.g., 64-bit Solaris 7). @item @code{sprintf} @c @fuindex sprintf @prindex @code{sprintf} @c @fuindex vsprintf @prindex @code{vsprintf} The C standard says @code{sprintf} and @code{vsprintf} return the number of bytes written. On some ancient systems (SunOS 4 for instance) they return the buffer pointer instead, but these no longer need to be worried about. @item @code{sscanf} @c @fuindex sscanf @prindex @code{sscanf} On various old systems, e.g., HP-UX 9, @code{sscanf} requires that its input string be writable (though it doesn't actually change it). This can be a problem when using @command{gcc} since it normally puts constant strings in read-only memory (@pxref{Incompatibilities, Incompatibilities of GCC, , gcc, Using and Porting the GNU Compiler Collection}). Apparently in some cases even having format strings read-only can be a problem. @item @code{strerror_r} @c @fuindex strerror_r @prindex @code{strerror_r} Posix specifies that @code{strerror_r} returns an @code{int}, but many systems (e.g., GNU C Library version 2.2.4) provide a different version returning a @code{char *}. @code{AC_FUNC_STRERROR_R} can detect which is in use (@pxref{Particular Functions}). @item @code{strnlen} @c @fuindex strnlen @prindex @code{strnlen} AIX 4.3 provides a broken version which produces the following results: @example strnlen ("foobar", 0) = 0 strnlen ("foobar", 1) = 3 strnlen ("foobar", 2) = 2 strnlen ("foobar", 3) = 1 strnlen ("foobar", 4) = 0 strnlen ("foobar", 5) = 6 strnlen ("foobar", 6) = 6 strnlen ("foobar", 7) = 6 strnlen ("foobar", 8) = 6 strnlen ("foobar", 9) = 6 @end example @item @code{sysconf} @c @fuindex sysconf @prindex @code{sysconf} @code{_SC_PAGESIZE} is standard, but some older systems (e.g., HP-UX 9) have @code{_SC_PAGE_SIZE} instead. This can be tested with @code{#ifdef}. @item @code{unlink} @c @fuindex unlink @prindex @code{unlink} The Posix spec says that @code{unlink} causes the given file to be removed only after there are no more open file handles for it. Some non-Posix hosts have trouble with this requirement, though, and some DOS variants even corrupt the file system. @item @code{unsetenv} @c @fuindex unsetenv @prindex @code{unsetenv} On MinGW, @code{unsetenv} is not available, but a variable @samp{FOO} can be removed with a call @code{putenv ("FOO=")}, as described under @code{putenv} above. @item @code{va_copy} @c @fuindex va_copy @prindex @code{va_copy} C99 and later provide @code{va_copy} for copying @code{va_list} variables. It may be available in older environments too, though possibly as @code{__va_copy} (e.g., @command{gcc} in strict pre-C99 mode). These can be tested with @code{#ifdef}. A fallback to @code{memcpy (&dst, &src, sizeof (va_list))} gives maximum portability. @item @code{va_list} @c @fuindex va_list @prindex @code{va_list} @code{va_list} is not necessarily just a pointer. It can be a @code{struct} (e.g., @command{gcc} on Alpha), which means @code{NULL} is not portable. Or it can be an array (e.g., @command{gcc} in some PowerPC configurations), which means as a function parameter it can be effectively call-by-reference and library routines might modify the value back in the caller (e.g., @code{vsnprintf} in the GNU C Library 2.1). @item Signed @code{>>} Normally the C @code{>>} right shift of a signed type replicates the high bit, giving a so-called ``arithmetic'' shift. But care should be taken since Standard C doesn't require that behavior. On those few processors without a native arithmetic shift (for instance Cray vector systems) zero bits may be shifted in, the same as a shift of an unsigned type. @item Integer @code{/} C divides signed integers by truncating their quotient toward zero, yielding the same result as Fortran. However, before C99 the standard allowed C implementations to take the floor or ceiling of the quotient in some cases. Hardly any implementations took advantage of this freedom, though, and it's probably not worth worrying about this issue nowadays. @end table @node Particular Functions @subsection Particular Function Checks @cindex Function, checking These macros check for particular C functions---whether they exist, and in some cases how they respond when given certain arguments. @anchor{AC_FUNC_ALLOCA} @defmac AC_FUNC_ALLOCA @acindex{FUNC_ALLOCA} @cvindex C_ALLOCA @cvindex HAVE_ALLOCA_H @ovindex ALLOCA @c @fuindex alloca @prindex @code{alloca} @hdrindex{alloca.h} @c @caindex working_alloca_h Check for the @code{alloca} function. Define @code{HAVE_ALLOCA_H} if @file{alloca.h} defines a working @code{alloca}. If not, look for a builtin alternative. If either method succeeds, define @code{HAVE_ALLOCA}. Otherwise, set the output variable @code{ALLOCA} to @samp{$@{LIBOBJDIR@}alloca.o} and define @code{C_ALLOCA} (so programs can periodically call @samp{alloca (0)} to garbage collect). This variable is separate from @code{LIBOBJS} so multiple programs can share the value of @code{ALLOCA} without needing to create an actual library, in case only some of them use the code in @code{LIBOBJS}. The @samp{$@{LIBOBJDIR@}} prefix serves the same purpose as in @code{LIBOBJS} (@pxref{AC_LIBOBJ vs LIBOBJS}). Source files that use @code{alloca} should start with a piece of code like the following, to declare it properly. @example @group #include <stdlib.h> #include <stddef.h> #ifdef HAVE_ALLOCA_H # include <alloca.h> #elif !defined alloca # ifdef __GNUC__ # define alloca __builtin_alloca # elif defined _MSC_VER # include <malloc.h> # define alloca _alloca # elif !defined HAVE_ALLOCA # ifdef __cplusplus extern "C" # endif void *alloca (size_t); # endif #endif @end group @end example If you don't want to maintain this piece of code in your package manually, you can instead use the Gnulib module @code{alloca-opt} or @code{alloca}. @xref{Gnulib}. @end defmac @defmac AC_FUNC_CHOWN @acindex{FUNC_CHOWN} @cvindex HAVE_CHOWN @c @fuindex chown @prindex @code{chown} @caindex func_chown_works If the @code{chown} function is available and works (in particular, it should accept @option{-1} for @code{uid} and @code{gid}), define @code{HAVE_CHOWN}. The result of this macro is cached in the @code{ac_cv_func_chown_works} variable. If you want a workaround, that is, a @code{chown} function that is available and works, you can use the Gnulib module @code{chown}. @xref{Gnulib}. @end defmac @anchor{AC_FUNC_CLOSEDIR_VOID} @defmac AC_FUNC_CLOSEDIR_VOID @acindex{FUNC_CLOSEDIR_VOID} @cvindex CLOSEDIR_VOID @c @fuindex closedir @prindex @code{closedir} @caindex func_closedir_void If the @code{closedir} function does not return a meaningful value, define @code{CLOSEDIR_VOID}. Otherwise, callers ought to check its return value for an error indicator. Currently this test is implemented by running a test program. When cross compiling the pessimistic assumption that @code{closedir} does not return a meaningful value is made. The result of this macro is cached in the @code{ac_cv_func_closedir_void} variable. This macro is obsolescent, as @code{closedir} returns a meaningful value on current systems. New programs need not use this macro. @end defmac @defmac AC_FUNC_ERROR_AT_LINE @acindex{FUNC_ERROR_AT_LINE} @c @fuindex error_at_line @prindex @code{error_at_line} @caindex lib_error_at_line If the @code{error_at_line} function is not found, require an @code{AC_LIBOBJ} replacement of @samp{error}. The result of this macro is cached in the @code{ac_cv_lib_error_at_line} variable. The @code{AC_FUNC_ERROR_AT_LINE} macro is obsolescent. New programs should use Gnulib's @code{error} module. @xref{Gnulib}. @end defmac @defmac AC_FUNC_FNMATCH @acindex{FUNC_FNMATCH} @c @fuindex fnmatch @prindex @code{fnmatch} @caindex func_fnmatch_works If the @code{fnmatch} function conforms to Posix, define @code{HAVE_FNMATCH}. Detect common implementation bugs, for example, the bugs in Solaris 2.4. Unlike the other specific @code{AC_FUNC} macros, @code{AC_FUNC_FNMATCH} does not replace a broken/missing @code{fnmatch}. This is for historical reasons. See @code{AC_REPLACE_FNMATCH} below. The result of this macro is cached in the @code{ac_cv_func_fnmatch_works} variable. This macro is obsolescent. New programs should use Gnulib's @code{fnmatch-posix} module. @xref{Gnulib}. @end defmac @defmac AC_FUNC_FNMATCH_GNU @acindex{FUNC_FNMATCH_GNU} @c @fuindex fnmatch @prindex @code{fnmatch} @caindex func_fnmatch_gnu Behave like @code{AC_REPLACE_FNMATCH} (@emph{replace}) but also test whether @code{fnmatch} supports GNU extensions. Detect common implementation bugs, for example, the bugs in the GNU C Library 2.1. The result of this macro is cached in the @code{ac_cv_func_fnmatch_gnu} variable. This macro is obsolescent. New programs should use Gnulib's @code{fnmatch-gnu} module. @xref{Gnulib}. @end defmac @anchor{AC_FUNC_FORK} @defmac AC_FUNC_FORK @acindex{FUNC_FORK} @cvindex HAVE_VFORK_H @cvindex HAVE_WORKING_FORK @cvindex HAVE_WORKING_VFORK @cvindex vfork @c @fuindex fork @prindex @code{fork} @c @fuindex vfork @prindex @code{vfork} @hdrindex{vfork.h} @c @caindex func_fork @c @caindex func_fork_works This macro checks for the @code{fork} and @code{vfork} functions. If a working @code{fork} is found, define @code{HAVE_WORKING_FORK}. This macro checks whether @code{fork} is just a stub by trying to run it. If @file{vfork.h} is found, define @code{HAVE_VFORK_H}. If a working @code{vfork} is found, define @code{HAVE_WORKING_VFORK}. Otherwise, define @code{vfork} to be @code{fork} for backward compatibility with previous versions of @command{autoconf}. This macro checks for several known errors in implementations of @code{vfork} and considers the system to not have a working @code{vfork} if it detects any of them. Since this macro defines @code{vfork} only for backward compatibility with previous versions of @command{autoconf} you're encouraged to define it yourself in new code: @example @group #ifndef HAVE_WORKING_VFORK # define vfork fork #endif @end group @end example The results of this macro are cached in the @code{ac_cv_func_fork_works} and @code{ac_cv_func_vfork_works} variables. In order to override the test, you also need to set the @code{ac_cv_func_fork} and @code{ac_cv_func_vfork} variables. @end defmac @defmac AC_FUNC_FSEEKO @acindex{FUNC_FSEEKO} @cvindex _LARGEFILE_SOURCE @cvindex HAVE_FSEEKO @c @fuindex fseeko @prindex @code{fseeko} @c @fuindex ftello @prindex @code{ftello} @c @caindex sys_largefile_source If the @code{fseeko} function is available, define @code{HAVE_FSEEKO}. Define @code{_LARGEFILE_SOURCE} if necessary to make the prototype visible on some systems (e.g., glibc 2.2). Otherwise linkage problems may occur when compiling with @code{AC_SYS_LARGEFILE} on largefile-sensitive systems where @code{off_t} does not default to a 64bit entity. All systems with @code{fseeko} also supply @code{ftello}. The Gnulib module @code{fseeko} invokes @code{AC_FUNC_FSEEKO} and also contains workarounds for other portability problems of @code{fseeko}. @xref{Gnulib}. @end defmac @defmac AC_FUNC_GETGROUPS @acindex{FUNC_GETGROUPS} @cvindex HAVE_GETGROUPS @ovindex GETGROUPS_LIBS @c @fuindex getgroups @prindex @code{getgroups} @caindex func_getgroups_works If the @code{getgroups} function is available and works (unlike on Ultrix 4.3 and NeXTstep 3.2, where @samp{getgroups (0, 0)} always fails), define @code{HAVE_GETGROUPS}. Set @code{GETGROUPS_LIBS} to any libraries needed to get that function. This macro runs @code{AC_TYPE_GETGROUPS}. This macro is obsolescent. New programs need not use this macro. But they may want to use the Gnulib module @code{getgroups}, which provides workarounds to other portability problems of this function. @end defmac @anchor{AC_FUNC_GETLOADAVG} @defmac AC_FUNC_GETLOADAVG @acindex{FUNC_GETLOADAVG} @cvindex SVR4 @cvindex DGUX @cvindex UMAX @cvindex UMAX4_3 @cvindex HAVE_NLIST_H @cvindex NLIST_NAME_UNION @cvindex GETLOADAVG_PRIVILEGED @cvindex NEED_SETGID @cvindex C_GETLOADAVG @ovindex LIBOBJS @ovindex NEED_SETGID @ovindex KMEM_GROUP @ovindex GETLOADAVG_LIBS @c @fuindex getloadavg @prindex @code{getloadavg} Check how to get the system load averages. To perform its tests properly, this macro needs the file @file{getloadavg.c}; therefore, be sure to set the @code{AC_LIBOBJ} replacement directory properly (see @ref{Generic Functions}, @code{AC_CONFIG_LIBOBJ_DIR}). If the system has the @code{getloadavg} function, define @code{HAVE_GETLOADAVG}, and set @code{GETLOADAVG_LIBS} to any libraries necessary to get that function. Also add @code{GETLOADAVG_LIBS} to @code{LIBS}. Otherwise, require an @code{AC_LIBOBJ} replacement for @samp{getloadavg} and possibly define several other C preprocessor macros and output variables: @enumerate @item Define @code{C_GETLOADAVG}. @item Define @code{SVR4}, @code{DGUX}, @code{UMAX}, or @code{UMAX4_3} if on those systems. @item @hdrindex{nlist.h} If @file{nlist.h} is found, define @code{HAVE_NLIST_H}. @item If @samp{struct nlist} has an @samp{n_un.n_name} member, define @code{HAVE_STRUCT_NLIST_N_UN_N_NAME}. The obsolete symbol @code{NLIST_NAME_UNION} is still defined, but do not depend upon it. @item Programs may need to be installed set-group-ID (or set-user-ID) for @code{getloadavg} to work. In this case, define @code{GETLOADAVG_PRIVILEGED}, set the output variable @code{NEED_SETGID} to @samp{true} (and otherwise to @samp{false}), and set @code{KMEM_GROUP} to the name of the group that should own the installed program. @end enumerate The @code{AC_FUNC_GETLOADAVG} macro is obsolescent. New programs should use Gnulib's @code{getloadavg} module. @xref{Gnulib}. @end defmac @anchor{AC_FUNC_GETMNTENT} @defmac AC_FUNC_GETMNTENT @acindex{FUNC_GETMNTENT} @cvindex HAVE_GETMNTENT @c @fuindex getmntent @prindex @code{getmntent} @caindex search_getmntent Check for @code{getmntent} in the standard C library, and then in the @file{sun}, @file{seq}, and @file{gen} libraries, for UNICOS, IRIX 4, PTX, and UnixWare, respectively. Then, if @code{getmntent} is available, define @code{HAVE_GETMNTENT} and set @code{ac_cv_func_getmntent} to @code{yes}. Otherwise set @code{ac_cv_func_getmntent} to @code{no}. The result of this macro can be overridden by setting the cache variable @code{ac_cv_search_getmntent}. The @code{AC_FUNC_GETMNTENT} macro is obsolescent. New programs should use Gnulib's @code{mountlist} module. @xref{Gnulib}. @end defmac @defmac AC_FUNC_GETPGRP @acindex{FUNC_GETPGRP} @cvindex GETPGRP_VOID @c @fuindex getpgid @c @fuindex getpgrp @prindex @code{getpgid} @prindex @code{getpgrp} @caindex func_getpgrp_void Define @code{GETPGRP_VOID} if it is an error to pass 0 to @code{getpgrp}; this is the Posix behavior. On older BSD systems, you must pass 0 to @code{getpgrp}, as it takes an argument and behaves like Posix's @code{getpgid}. @example #ifdef GETPGRP_VOID pid = getpgrp (); #else pid = getpgrp (0); #endif @end example This macro does not check whether @code{getpgrp} exists at all; if you need to work in that situation, first call @code{AC_CHECK_FUNC} for @code{getpgrp}. The result of this macro is cached in the @code{ac_cv_func_getpgrp_void} variable. This macro is obsolescent, as current systems have a @code{getpgrp} whose signature conforms to Posix. New programs need not use this macro. @end defmac @defmac AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK @acindex{FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK} @cvindex LSTAT_FOLLOWS_SLASHED_SYMLINK @c @fuindex lstat @prindex @code{lstat} @caindex func_lstat_dereferences_slashed_symlink If @file{link} is a symbolic link, then @code{lstat} should treat @file{link/} the same as @file{link/.}. However, many older @code{lstat} implementations incorrectly ignore trailing slashes. It is safe to assume that if @code{lstat} incorrectly ignores trailing slashes, then other symbolic-link-aware functions like @code{unlink} also incorrectly ignore trailing slashes. If @code{lstat} behaves properly, define @code{LSTAT_FOLLOWS_SLASHED_SYMLINK}, otherwise require an @code{AC_LIBOBJ} replacement of @code{lstat}. The result of this macro is cached in the @code{ac_cv_func_lstat_dereferences_slashed_symlink} variable. The @code{AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK} macro is obsolescent. New programs should use Gnulib's @code{lstat} module. @xref{Gnulib}. @end defmac @defmac AC_FUNC_MALLOC @acindex{FUNC_MALLOC} @cvindex HAVE_MALLOC @cvindex malloc @c @fuindex malloc @prindex @code{malloc} @caindex func_malloc_0_nonnull If the @code{malloc} function is compatible with the GNU C library @code{malloc} (i.e., @samp{malloc (0)} returns a valid pointer), define @code{HAVE_MALLOC} to 1. Otherwise define @code{HAVE_MALLOC} to 0, ask for an @code{AC_LIBOBJ} replacement for @samp{malloc}, and define @code{malloc} to @code{rpl_malloc} so that the native @code{malloc} is not used in the main project. Typically, the replacement file @file{malloc.c} should look like (note the @samp{#undef malloc}): @verbatim #include <config.h> #undef malloc #include <sys/types.h> void *malloc (); /* Allocate an N-byte block of memory from the heap. If N is zero, allocate a 1-byte block. */ void * rpl_malloc (size_t n) { if (n == 0) n = 1; return malloc (n); } @end verbatim The result of this macro is cached in the @code{ac_cv_func_malloc_0_nonnull} variable. If you don't want to maintain a @code{malloc.c} file in your package manually, you can instead use the Gnulib module @code{malloc-gnu}. @end defmac @defmac AC_FUNC_MBRTOWC @acindex{FUNC_MBRTOWC} @cvindex HAVE_MBRTOWC @c @fuindex mbrtowc @prindex @code{mbrtowc} @caindex func_mbrtowc Define @code{HAVE_MBRTOWC} to 1 if the function @code{mbrtowc} and the type @code{mbstate_t} are properly declared. The result of this macro is cached in the @code{ac_cv_func_mbrtowc} variable. The Gnulib module @code{mbrtowc} not only ensures that the function is declared, but also works around other portability problems of this function. @end defmac @defmac AC_FUNC_MEMCMP @acindex{FUNC_MEMCMP} @ovindex LIBOBJS @c @fuindex memcmp @prindex @code{memcmp} @caindex func_memcmp_working If the @code{memcmp} function is not available, or does not work on 8-bit data (like the one on SunOS 4.1.3), or fails when comparing 16 bytes or more and with at least one buffer not starting on a 4-byte boundary (such as the one on NeXT x86 OpenStep), require an @code{AC_LIBOBJ} replacement for @samp{memcmp}. The result of this macro is cached in the @code{ac_cv_func_memcmp_working} variable. This macro is obsolescent, as current systems have a working @code{memcmp}. New programs need not use this macro. @end defmac @defmac AC_FUNC_MKTIME @acindex{FUNC_MKTIME} @ovindex LIBOBJS @c @fuindex mktime @prindex @code{mktime} @caindex func_working_mktime If the @code{mktime} function is not available, or does not work correctly, require an @code{AC_LIBOBJ} replacement for @samp{mktime}. For the purposes of this test, @code{mktime} should conform to the Posix standard and should be the inverse of @code{localtime}. The result of this macro is cached in the @code{ac_cv_func_working_mktime} variable. The @code{AC_FUNC_MKTIME} macro is obsolescent. New programs should use Gnulib's @code{mktime} module. @xref{Gnulib}. @end defmac @anchor{AC_FUNC_MMAP} @defmac AC_FUNC_MMAP @acindex{FUNC_MMAP} @cvindex HAVE_MMAP @c @fuindex mmap @prindex @code{mmap} @caindex func_mmap_fixed_mapped If the @code{mmap} function exists and works correctly, define @code{HAVE_MMAP}. This checks only private fixed mapping of already-mapped memory. The result of this macro is cached in the @code{ac_cv_func_mmap_fixed_mapped} variable. Note: This macro asks for more than what an average program needs from @code{mmap}. In particular, the use of @code{MAP_FIXED} fails on HP-UX 11, whereas @code{mmap} otherwise works fine on this platform. @end defmac @defmac AC_FUNC_OBSTACK @acindex{FUNC_OBSTACK} @cvindex HAVE_OBSTACK @cindex obstack @caindex func_obstack If the obstacks are found, define @code{HAVE_OBSTACK}, else require an @code{AC_LIBOBJ} replacement for @samp{obstack}. The result of this macro is cached in the @code{ac_cv_func_obstack} variable. The @code{AC_FUNC_OBSTACK} macro is obsolescent. New programs should use Gnulib's @code{obstack} module. @xref{Gnulib}. @end defmac @defmac AC_FUNC_REALLOC @acindex{FUNC_REALLOC} @cvindex HAVE_REALLOC @cvindex realloc @c @fuindex realloc @prindex @code{realloc} @caindex func_realloc_0_nonnull If the @code{realloc} function is compatible with the GNU C library @code{realloc} (i.e., @samp{realloc (NULL, 0)} returns a valid pointer), define @code{HAVE_REALLOC} to 1. Otherwise define @code{HAVE_REALLOC} to 0, ask for an @code{AC_LIBOBJ} replacement for @samp{realloc}, and define @code{realloc} to @code{rpl_realloc} so that the native @code{realloc} is not used in the main project. See @code{AC_FUNC_MALLOC} for details. The result of this macro is cached in the @code{ac_cv_func_realloc_0_nonnull} variable. If you don't want to maintain a @code{realloc.c} file in your package manually, you can instead use the Gnulib module @code{realloc-gnu}. @end defmac @defmac AC_FUNC_SELECT_ARGTYPES @acindex{FUNC_SELECT_ARGTYPES} @cvindex SELECT_TYPE_ARG1 @cvindex SELECT_TYPE_ARG234 @cvindex SELECT_TYPE_ARG5 @c @fuindex select @prindex @code{select} @c @caindex func_select_args Determines the correct type to be passed for each of the @code{select} function's arguments, and defines those types in @code{SELECT_TYPE_ARG1}, @code{SELECT_TYPE_ARG234}, and @code{SELECT_TYPE_ARG5} respectively. @code{SELECT_TYPE_ARG1} defaults to @samp{int}, @code{SELECT_TYPE_ARG234} defaults to @samp{int *}, and @code{SELECT_TYPE_ARG5} defaults to @samp{struct timeval *}. This macro is obsolescent, as current systems have a @code{select} whose signature conforms to Posix. New programs need not use this macro. @end defmac @defmac AC_FUNC_SETPGRP @acindex{FUNC_SETPGRP} @cvindex SETPGRP_VOID @c @fuindex setpgrp @prindex @code{setpgrp} @caindex func_setpgrp_void If @code{setpgrp} takes no argument (the Posix version), define @code{SETPGRP_VOID}. Otherwise, it is the BSD version, which takes two process IDs as arguments. This macro does not check whether @code{setpgrp} exists at all; if you need to work in that situation, first call @code{AC_CHECK_FUNC} for @code{setpgrp}. This macro also does not check for the Solaris variant of @code{setpgrp}, which returns a @code{pid_t} instead of an @code{int}; portable code should only use the return value by comparing it against @code{-1} to check for errors. The result of this macro is cached in the @code{ac_cv_func_setpgrp_void} variable. This macro is obsolescent, as all forms of @code{setpgrp} are also obsolescent. New programs should use the Posix function @code{setpgid}, which takes two process IDs as arguments (like the BSD @code{setpgrp}). @end defmac @defmac AC_FUNC_STAT @defmacx AC_FUNC_LSTAT @acindex{FUNC_STAT} @acindex{FUNC_LSTAT} @cvindex HAVE_STAT_EMPTY_STRING_BUG @cvindex HAVE_LSTAT_EMPTY_STRING_BUG @c @fuindex stat @prindex @code{stat} @c @fuindex lstat @prindex @code{lstat} @caindex func_stat_empty_string_bug @caindex func_lstat_empty_string_bug Determine whether @code{stat} or @code{lstat} have the bug that it succeeds when given the zero-length file name as argument. The @code{stat} and @code{lstat} from SunOS 4.1.4 and the Hurd (as of 1998-11-01) do this. If it does, then define @code{HAVE_STAT_EMPTY_STRING_BUG} (or @code{HAVE_LSTAT_EMPTY_STRING_BUG}) and ask for an @code{AC_LIBOBJ} replacement of it. The results of these macros are cached in the @code{ac_cv_func_stat_empty_string_bug} and the @code{ac_cv_func_lstat_empty_string_bug} variables, respectively. These macros are obsolescent, as no current systems have the bug. New programs need not use these macros. @end defmac @anchor{AC_FUNC_STRCOLL} @defmac AC_FUNC_STRCOLL @acindex{FUNC_STRCOLL} @cvindex HAVE_STRCOLL @c @fuindex strcoll @prindex @code{strcoll} @caindex func_strcoll_works If the @code{strcoll} function exists and works correctly, define @code{HAVE_STRCOLL}. This does a bit more than @samp{AC_CHECK_FUNCS(strcoll)}, because some systems have incorrect definitions of @code{strcoll} that should not be used. But it does not check against a known bug of this function on Solaris 10. The result of this macro is cached in the @code{ac_cv_func_strcoll_works} variable. @end defmac @defmac AC_FUNC_STRERROR_R @acindex{FUNC_STRERROR_R} @cvindex HAVE_STRERROR_R @cvindex HAVE_DECL_STRERROR_R @cvindex STRERROR_R_CHAR_P @c @fuindex strerror_r @caindex func_strerror_r_char_p @prindex @code{strerror_r} If @code{strerror_r} is available, define @code{HAVE_STRERROR_R}, and if it is declared, define @code{HAVE_DECL_STRERROR_R}. If it returns a @code{char *} message, define @code{STRERROR_R_CHAR_P}; otherwise it returns an @code{int} error number. The Thread-Safe Functions option of Posix requires @code{strerror_r} to return @code{int}, but many systems (including, for example, version 2.2.4 of the GNU C Library) return a @code{char *} value that is not necessarily equal to the buffer argument. The result of this macro is cached in the @code{ac_cv_func_strerror_r_char_p} variable. The Gnulib module @code{strerror_r} not only ensures that the function has the return type specified by Posix, but also works around other portability problems of this function. @end defmac @anchor{AC_FUNC_STRFTIME} @defmac AC_FUNC_STRFTIME @acindex{FUNC_STRFTIME} @cvindex HAVE_STRFTIME @c @fuindex strftime @prindex @code{strftime} Check for @code{strftime} in the @file{intl} library, for SCO Unix. Then, if @code{strftime} is available, define @code{HAVE_STRFTIME}. This macro is obsolescent, as no current systems require the @file{intl} library for @code{strftime}. New programs need not use this macro. @end defmac @defmac AC_FUNC_STRTOD @acindex{FUNC_STRTOD} @ovindex POW_LIB @c @fuindex strtod @prindex @code{strtod} @caindex func_strtod @caindex func_pow If the @code{strtod} function does not exist or doesn't work correctly, ask for an @code{AC_LIBOBJ} replacement of @samp{strtod}. In this case, because @file{strtod.c} is likely to need @samp{pow}, set the output variable @code{POW_LIB} to the extra library needed. This macro caches its result in the @code{ac_cv_func_strtod} variable and depends upon the result in the @code{ac_cv_func_pow} variable. The @code{AC_FUNC_STRTOD} macro is obsolescent. New programs should use Gnulib's @code{strtod} module. @xref{Gnulib}. @end defmac @defmac AC_FUNC_STRTOLD @acindex{FUNC_STRTOLD} @cvindex HAVE_STRTOLD @prindex @code{strtold} @caindex func_strtold If the @code{strtold} function exists and conforms to C99 or later, define @code{HAVE_STRTOLD}. This macro caches its result in the @code{ac_cv_func_strtold} variable. The Gnulib module @code{strtold} not only ensures that the function exists, but also works around other portability problems of this function. @end defmac @defmac AC_FUNC_STRNLEN @acindex{FUNC_STRNLEN} @cvindex HAVE_STRNLEN @c @fuindex strnlen @prindex @code{strnlen} @caindex func_strnlen_working If the @code{strnlen} function is not available, or is buggy (like the one from AIX 4.3), require an @code{AC_LIBOBJ} replacement for it. This macro caches its result in the @code{ac_cv_func_strnlen_working} variable. The @code{AC_FUNC_STRNLEN} macro is obsolescent. New programs should use Gnulib's @code{strnlen} module. @xref{Gnulib}. @end defmac @anchor{AC_FUNC_UTIME_NULL} @defmac AC_FUNC_UTIME_NULL @acindex{FUNC_UTIME_NULL} @cvindex HAVE_UTIME_NULL @c @fuindex utime @prindex @code{utime} @caindex func_utime_null If @samp{utime (@var{file}, NULL)} sets @var{file}'s timestamp to the present, define @code{HAVE_UTIME_NULL}. This macro caches its result in the @code{ac_cv_func_utime_null} variable. This macro is obsolescent, as all current systems have a @code{utime} that behaves this way. New programs need not use this macro. @end defmac @anchor{AC_FUNC_VPRINTF} @defmac AC_FUNC_VPRINTF @acindex{FUNC_VPRINTF} @cvindex HAVE_VPRINTF @cvindex HAVE_DOPRNT @c @fuindex vprintf @prindex @code{vprintf} @c @fuindex vsprintf @prindex @code{vsprintf} If @code{vprintf} is found, define @code{HAVE_VPRINTF}. Otherwise, if @code{_doprnt} is found, define @code{HAVE_DOPRNT}. (If @code{vprintf} is available, you may assume that @code{vfprintf} and @code{vsprintf} are also available.) This macro is obsolescent, as all current systems have @code{vprintf}. New programs need not use this macro. @end defmac @defmac AC_REPLACE_FNMATCH @acindex{REPLACE_FNMATCH} @c @fuindex fnmatch @prindex @code{fnmatch} @hdrindex{fnmatch.h} @caindex func_fnmatch_works If the @code{fnmatch} function does not conform to Posix (see @code{AC_FUNC_FNMATCH}), ask for its @code{AC_LIBOBJ} replacement. The files @file{fnmatch.c}, @file{fnmatch_loop.c}, and @file{fnmatch_.h} in the @code{AC_LIBOBJ} replacement directory are assumed to contain a copy of the source code of GNU @code{fnmatch}. If necessary, this source code is compiled as an @code{AC_LIBOBJ} replacement, and the @file{fnmatch_.h} file is linked to @file{fnmatch.h} so that it can be included in place of the system @code{<fnmatch.h>}. This macro caches its result in the @code{ac_cv_func_fnmatch_works} variable. This macro is obsolescent, as it assumes the use of particular source files. New programs should use Gnulib's @code{fnmatch-posix} module, which provides this macro along with the source files. @xref{Gnulib}. @end defmac @node Generic Functions @subsection Generic Function Checks These macros are used to find functions not covered by the ``particular'' test macros. If the functions might be in libraries other than the default C library, first call @code{AC_CHECK_LIB} for those libraries. If you need to check the behavior of a function as well as find out whether it is present, you have to write your own test for it (@pxref{Writing Tests}). @anchor{AC_CHECK_FUNC} @defmac AC_CHECK_FUNC (@var{function}, @ovar{action-if-found}, @ @ovar{action-if-not-found}) @acindex{CHECK_FUNC} @caindex func_@var{function} If C function @var{function} is available, run shell commands @var{action-if-found}, otherwise @var{action-if-not-found}. If you just want to define a symbol if the function is available, consider using @code{AC_CHECK_FUNCS} instead. This macro checks for functions with C linkage even when @code{AC_LANG(C++)} has been called, since C is more standardized than C++. (@pxref{Language Choice}, for more information about selecting the language for checks.) This macro caches its result in the @code{ac_cv_func_@var{function}} variable. @end defmac @anchor{AC_CHECK_FUNCS} @defmac AC_CHECK_FUNCS (@var{function}@dots{}, @ovar{action-if-found}, @ @ovar{action-if-not-found}) @acindex{CHECK_FUNCS} @cvindex HAVE_@var{function} For each @var{function} enumerated in the blank-or-newline-separated argument list, define @code{HAVE_@var{function}} (in all capitals) if it is available. If @var{action-if-found} is given, it is additional shell code to execute when one of the functions is found. You can give it a value of @samp{break} to break out of the loop on the first match. If @var{action-if-not-found} is given, it is executed when one of the functions is not found. Results are cached for each @var{function} as in @code{AC_CHECK_FUNC}. @end defmac @defmac AC_CHECK_FUNCS_ONCE (@var{function}@dots{}) @acindex{CHECK_FUNCS_ONCE} @cvindex HAVE_@var{function} For each @var{function} enumerated in the blank-or-newline-separated argument list, define @code{HAVE_@var{function}} (in all capitals) if it is available. This is a once-only variant of @code{AC_CHECK_FUNCS}. It generates the checking code at most once, so that @command{configure} is smaller and faster; but the checks cannot be conditionalized and are always done once, early during the @command{configure} run. @end defmac @sp 1 Autoconf follows a philosophy that was formed over the years by those who have struggled for portability: isolate the portability issues in specific files, and then program as if you were in a Posix environment. Some functions may be missing or unfixable, and your package must be ready to replace them. Suitable replacements for many such problem functions are available from Gnulib (@pxref{Gnulib}). @defmac AC_LIBOBJ (@var{function}) @acindex{LIBOBJ} @ovindex LIBOBJS Specify that @samp{@var{function}.c} must be included in the executables to replace a missing or broken implementation of @var{function}. @vrindex ac_objext Technically, it adds @samp{@var{function}.$ac_objext} to the output variable @code{LIBOBJS} if it is not already in, and calls @code{AC_LIBSOURCE} for @samp{@var{function}.c}. You should not directly change @code{LIBOBJS}, since this is not traceable. @end defmac @defmac AC_LIBSOURCE (@var{file}) @acindex{LIBSOURCE} Specify that @var{file} might be needed to compile the project. If you need to know what files might be needed by a @file{configure.ac}, you should trace @code{AC_LIBSOURCE}. @var{file} must be a literal. This macro is called automatically from @code{AC_LIBOBJ}, but you must call it explicitly if you pass a shell variable to @code{AC_LIBOBJ}. In that case, since shell variables cannot be traced statically, you must pass to @code{AC_LIBSOURCE} any possible files that the shell variable might cause @code{AC_LIBOBJ} to need. For example, if you want to pass a variable @code{$foo_or_bar} to @code{AC_LIBOBJ} that holds either @code{"foo"} or @code{"bar"}, you should do: @example AC_LIBSOURCE([foo.c]) AC_LIBSOURCE([bar.c]) AC_LIBOBJ([$foo_or_bar]) @end example @noindent There is usually a way to avoid this, however, and you are encouraged to simply call @code{AC_LIBOBJ} with literal arguments. Note that this macro replaces the obsolete @code{AC_LIBOBJ_DECL}, with slightly different semantics: the old macro took the function name, e.g., @code{foo}, as its argument rather than the file name. @end defmac @defmac AC_LIBSOURCES (@var{files}) @acindex{LIBSOURCES} Like @code{AC_LIBSOURCE}, but accepts one or more @var{files} in a comma-separated M4 list. Thus, the above example might be rewritten: @example AC_LIBSOURCES([foo.c, bar.c]) AC_LIBOBJ([$foo_or_bar]) @end example @end defmac @defmac AC_CONFIG_LIBOBJ_DIR (@var{directory}) @acindex{CONFIG_LIBOBJ_DIR} Specify that @code{AC_LIBOBJ} replacement files are to be found in @var{directory}, a name relative to the top level of the source tree. The replacement directory defaults to @file{.}, the top level directory, and the most typical value is @file{lib}, corresponding to @samp{AC_CONFIG_LIBOBJ_DIR([lib])}. @command{configure} might need to know the replacement directory for the following reasons: (i) some checks use the replacement files, (ii) some macros bypass broken system headers by installing links to the replacement headers (iii) when used in conjunction with Automake, within each makefile, @var{directory} is used as a relative path from @code{$(top_srcdir)} to each object named in @code{LIBOBJS} and @code{LTLIBOBJS}, etc. @end defmac @sp 1 It is common to merely check for the existence of a function, and ask for its @code{AC_LIBOBJ} replacement if missing. The following macro is a convenient shorthand. @defmac AC_REPLACE_FUNCS (@var{function}@dots{}) @acindex{REPLACE_FUNCS} @cvindex HAVE_@var{function} @ovindex LIBOBJS Like @code{AC_CHECK_FUNCS}, but uses @samp{AC_LIBOBJ(@var{function})} as @var{action-if-not-found}. You can declare your replacement function by enclosing the prototype in @samp{#ifndef HAVE_@var{function}}. If the system has the function, it probably declares it in a header file you should be including, so you shouldn't redeclare it lest your declaration conflict. @end defmac @node Header Files @section Header Files @cindex Header, checking The following macros check for the presence of certain C header files. If there is no macro specifically defined to check for a header file you need, and you don't need to check for any special properties of it, then you can use one of the general header-file check macros. @menu * Header Portability:: Collected knowledge on common headers * Particular Headers:: Special handling to find certain headers * Generic Headers:: How to find other headers @end menu @node Header Portability @subsection Portability of Headers @cindex Portability of headers @cindex Header portability This section documents some collected knowledge about common headers, and the problems they cause. By definition, this list always requires additions. A much more complete list is maintained by the Gnulib project (@pxref{Gnulib}), covering @ref{Header File Substitutes, , Posix Headers, gnulib, Gnulib} and @ref{Glibc Header File Substitutes, , Glibc Headers, gnulib, Gnulib}. Please help us keep the Gnulib list as complete as possible. When we say that a header ``may require'' some set of other headers, we mean that it may be necessary for you to manually include those other headers first, or the contents of the header under test will fail to compile. When checking for these headers, you must provide the potentially-required headers in the @var{includes} argument to @code{AC_CHECK_HEADER} or @code{AC_CHECK_HEADERS}, or the check will fail spuriously. @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}) arranges to include a number of common requirements and should normally come first in your @var{includes}. For example, @file{net/if.h} may require @file{sys/types.h}, @file{sys/socket.h}, or both, and @code{AC_INCLUDES_DEFAULT} handles @file{sys/types.h} but not @file{sys/socket.h}, so you should check for it like this: @example AC_CHECK_HEADERS([sys/socket.h]) AC_CHECK_HEADERS([net/if.h], [], [], [AC_INCLUDES_DEFAULT[ #ifdef HAVE_SYS_SOCKET_H # include <sys/socket.h> #endif ]]) @end example Note that the example mixes single quoting (for@code{AC_INCLUDES_DEFAULT}, so that it gets expanded) and double quoting (to ensure that each preprocessor @code{#} gets treated as a literal string rather than a comment). @table @asis @item @file{limits.h} In C99 and later, @file{limits.h} defines @code{LLONG_MIN}, @code{LLONG_MAX}, and @code{ULLONG_MAX}, but many almost-C99 environments (e.g., default GCC 4.0.2 + glibc 2.4) do not define them. @item @file{memory.h} @hdrindex{memory.h} This header file is obsolete; use @file{string.h} instead. @item @file{strings.h} @hdrindex{strings.h} On some systems, this is the only header that declares @code{strcasecmp}, @code{strncasecmp}, and @code{ffs}. This header may or may not include @file{string.h} for you. However, on all recent systems it is safe to include both @file{string.h} and @file{strings.h}, in either order, in the same source file. @item @file{inttypes.h} vs.@: @file{stdint.h} @hdrindex{inttypes.h} @hdrindex{stdint.h} C99 specifies that @file{inttypes.h} includes @file{stdint.h}, so there's no need to include @file{stdint.h} separately in a standard environment. However, some implementations have @file{inttypes.h} but not @file{stdint.h} (e.g., Solaris 7), and some have @file{stdint.h} but not @file{inttypes.h} (e.g. MSVC 2012). Therefore, it is necessary to check for each and include each only if available. @item @file{linux/irda.h} @hdrindex{linux/irda.h} This header may require @file{linux/types.h} and/or @file{sys/socket.h}. @item @file{linux/random.h} @hdrindex{linux/random.h} This header may require @file{linux/types.h}. @item @file{net/if.h} @hdrindex{net/if.h} This header may require @file{sys/types.h} and/or @file{sys/socket.h}. @item @file{netinet/if_ether.h} @hdrindex{netinet/if_ether.h} This header may require some combination of @file{sys/types.h}, @file{sys/socket.h}, @file{netinet/in.h}, and @file{net/if.h}. @item @file{sys/mount.h} @hdrindex{sys/mount.h} This header may require @file{sys/params.h}. @item @file{sys/ptem.h} @hdrindex{sys/ptem.h} This header may require @file{sys/stream.h}. @item @file{sys/socket.h} @hdrindex{sys/socket.h} This header may require @file{sys/types.h}. @item @file{sys/ucred.h} @hdrindex{sys/ucred.h} This header may require @file{sys/types.h}. @item @file{X11/extensions/scrnsaver.h} @hdrindex{X11/extensions/scrnsaver.h} Using XFree86, this header requires @file{X11/Xlib.h}, which is probably so required that you might not even consider looking for it. @end table @node Particular Headers @subsection Particular Header Checks These macros check for particular system header files---whether they exist, and in some cases whether they declare certain symbols. @defmac AC_CHECK_HEADER_STDBOOL @acindex{CHECK_HEADER_STDBOOL} @cvindex HAVE__BOOL @hdrindex{stdbool.h} @caindex header_stdbool_h Check whether @file{stdbool.h} exists and conforms to C99 or later, and cache the result in the @code{ac_cv_header_stdbool_h} variable. If the type @code{_Bool} is defined, define @code{HAVE__BOOL} to 1. This macro is intended for use by Gnulib (@pxref{Gnulib}) and other packages that supply a substitute @file{stdbool.h} on platforms lacking a conforming one. The @code{AC_HEADER_STDBOOL} macro is better for code that explicitly checks for @file{stdbool.h}. @end defmac @defmac AC_HEADER_ASSERT @acindex{HEADER_ASSERT} @cvindex NDEBUG @hdrindex{assert.h} Check whether to enable assertions in the style of @file{assert.h}. Assertions are enabled by default, but the user can override this by invoking @command{configure} with the @option{--disable-assert} option. @end defmac @anchor{AC_HEADER_DIRENT} @defmac AC_HEADER_DIRENT @acindex{HEADER_DIRENT} @cvindex HAVE_DIRENT_H @cvindex HAVE_NDIR_H @cvindex HAVE_SYS_DIR_H @cvindex HAVE_SYS_NDIR_H @hdrindex{dirent.h} @hdrindex{sys/ndir.h} @hdrindex{sys/dir.h} @hdrindex{ndir.h} Check for the following header files. For the first one that is found and defines @samp{DIR}, define the listed C preprocessor macro: @multitable {@file{sys/ndir.h}} {@code{HAVE_SYS_NDIR_H}} @item @file{dirent.h} @tab @code{HAVE_DIRENT_H} @item @file{sys/ndir.h} @tab @code{HAVE_SYS_NDIR_H} @item @file{sys/dir.h} @tab @code{HAVE_SYS_DIR_H} @item @file{ndir.h} @tab @code{HAVE_NDIR_H} @end multitable The directory-library declarations in your source code should look something like the following: @example @group #include <sys/types.h> #ifdef HAVE_DIRENT_H # include <dirent.h> # define NAMLEN(dirent) strlen ((dirent)->d_name) #else # define dirent direct # define NAMLEN(dirent) ((dirent)->d_namlen) # ifdef HAVE_SYS_NDIR_H # include <sys/ndir.h> # endif # ifdef HAVE_SYS_DIR_H # include <sys/dir.h> # endif # ifdef HAVE_NDIR_H # include <ndir.h> # endif #endif @end group @end example Using the above declarations, the program would declare variables to be of type @code{struct dirent}, not @code{struct direct}, and would access the length of a directory entry name by passing a pointer to a @code{struct dirent} to the @code{NAMLEN} macro. This macro also checks for the SCO Xenix @file{dir} and @file{x} libraries. This macro is obsolescent, as all current systems with directory libraries have @code{<dirent.h>}. New programs need not use this macro. Also see @code{AC_STRUCT_DIRENT_D_INO} and @code{AC_STRUCT_DIRENT_D_TYPE} (@pxref{Particular Structures}). @end defmac @anchor{AC_HEADER_MAJOR} @defmac AC_HEADER_MAJOR @acindex{HEADER_MAJOR} @cvindex MAJOR_IN_MKDEV @cvindex MAJOR_IN_SYSMACROS @hdrindex{sys/mkdev.h} @hdrindex{sys/sysmacros.h} Detect the headers required to use @code{makedev}, @code{major}, and @code{minor}. These functions may be defined by @file{sys/mkdev.h}, @code{sys/sysmacros.h}, or @file{sys/types.h}. @code{AC_HEADER_MAJOR} defines @code{MAJOR_IN_MKDEV} if they are in @file{sys/mkdev.h}, or @code{MAJOR_IN_SYSMACROS} if they are in @file{sys/sysmacros.h}. If neither macro is defined, they are either in @file{sys/types.h} or unavailable. To properly use these functions, your code should contain something like: @verbatim #include <sys/types.h> #ifdef MAJOR_IN_MKDEV # include <sys/mkdev.h> #elif defined MAJOR_IN_SYSMACROS # include <sys/sysmacros.h> #endif @end verbatim Note: Configure scripts built with Autoconf 2.69 or earlier will not detect a problem if @file{sys/types.h} contains definitions of @code{major}, @code{minor}, and/or @code{makedev} that trigger compiler warnings upon use. This is known to occur with GNU libc 2.25, where those definitions are being deprecated to reduce namespace pollution. If it is not practical to use Autoconf 2.70 to regenerate the configure script of affected software, you can work around the problem by setting @samp{ac_cv_header_sys_types_h_makedev=no}, as an argument to @command{configure} or as part of a @file{config.site} site default file (@pxref{Site Defaults}). @end defmac @defmac AC_HEADER_RESOLV @acindex{HEADER_RESOLV} @cvindex HAVE_RESOLV_H @hdrindex{resolv.h} Checks for header @file{resolv.h}, checking for prerequisites first. To properly use @file{resolv.h}, your code should contain something like the following: @verbatim #ifdef HAVE_SYS_TYPES_H # include <sys/types.h> #endif #ifdef HAVE_NETINET_IN_H # include <netinet/in.h> /* inet_ functions / structs */ #endif #ifdef HAVE_ARPA_NAMESER_H # include <arpa/nameser.h> /* DNS HEADER struct */ #endif #ifdef HAVE_NETDB_H # include <netdb.h> #endif #include <resolv.h> @end verbatim @end defmac @anchor{AC_HEADER_STAT} @defmac AC_HEADER_STAT @acindex{HEADER_STAT} @cvindex STAT_MACROS_BROKEN @hdrindex{sys/stat.h} If the macros @code{S_ISDIR}, @code{S_ISREG}, etc.@: defined in @file{sys/stat.h} do not work properly (returning false positives), define @code{STAT_MACROS_BROKEN}. This is the case on Tektronix UTekV, Amdahl UTS and Motorola System V/88. This macro is obsolescent, as no current systems have the bug. New programs need not use this macro. @end defmac @defmac AC_HEADER_STDBOOL @acindex{HEADER_STDBOOL} @cvindex HAVE_STDBOOL_H @cvindex HAVE__BOOL @hdrindex{stdbool.h} @caindex header_stdbool_h If @file{stdbool.h} exists and conforms to C99 or later, define @code{HAVE_STDBOOL_H} to 1; if the type @code{_Bool} is defined, define @code{HAVE__BOOL} to 1. To fulfill the standard's requirements, your program could contain the following code: @example @group #ifdef HAVE_STDBOOL_H # include <stdbool.h> #else # ifndef HAVE__BOOL # ifdef __cplusplus typedef bool _Bool; # else # define _Bool signed char # endif # endif # define bool _Bool # define false 0 # define true 1 # define __bool_true_false_are_defined 1 #endif @end group @end example Alternatively you can use the @samp{stdbool} package of Gnulib (@pxref{Gnulib}). It simplifies your code so that it can say just @code{#include <stdbool.h>}, and it adds support for less-common platforms. This macro caches its result in the @code{ac_cv_header_stdbool_h} variable. This macro differs from @code{AC_CHECK_HEADER_STDBOOL} only in that it defines @code{HAVE_STDBOOL_H} whereas @code{AC_CHECK_HEADER_STDBOOL} does not. @end defmac @anchor{AC_HEADER_STDC} @defmac AC_HEADER_STDC @acindex{HEADER_STDC} @cvindex STDC_HEADERS @caindex header_stdc This macro is obsolescent. Its sole effect is to make sure that all the headers that are included by @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}), but not part of ISO C90, have been checked for. All hosted environments that are still of interest for portable code provide all of the headers specified in ISO C90 (as amended in 1995). @end defmac @defmac AC_HEADER_SYS_WAIT @acindex{HEADER_SYS_WAIT} @cvindex HAVE_SYS_WAIT_H @hdrindex{sys/wait.h} @caindex header_sys_wait_h If @file{sys/wait.h} exists and is compatible with Posix, define @code{HAVE_SYS_WAIT_H}. Incompatibility can occur if @file{sys/wait.h} does not exist, or if it uses the old BSD @code{union wait} instead of @code{int} to store a status value. If @file{sys/wait.h} is not Posix compatible, then instead of including it, define the Posix macros with their usual interpretations. Here is an example: @example @group #include <sys/types.h> #ifdef HAVE_SYS_WAIT_H # include <sys/wait.h> #endif #ifndef WEXITSTATUS # define WEXITSTATUS(stat_val) ((unsigned int) (stat_val) >> 8) #endif #ifndef WIFEXITED # define WIFEXITED(stat_val) (((stat_val) & 255) == 0) #endif @end group @end example @noindent This macro caches its result in the @code{ac_cv_header_sys_wait_h} variable. This macro is obsolescent, as current systems are compatible with Posix. New programs need not use this macro. @end defmac @cvindex _POSIX_VERSION @hdrindex{unistd.h} @code{_POSIX_VERSION} is defined when @file{unistd.h} is included on Posix systems. If there is no @file{unistd.h}, it is definitely not a Posix system. However, some non-Posix systems do have @file{unistd.h}. The way to check whether the system supports Posix is: @example @group #ifdef HAVE_UNISTD_H # include <sys/types.h> # include <unistd.h> #endif #ifdef _POSIX_VERSION /* Code for Posix systems. */ #endif @end group @end example @defmac AC_HEADER_TIOCGWINSZ @acindex{HEADER_TIOCGWINSZ} @cvindex GWINSZ_IN_SYS_IOCTL @hdrindex{sys/ioctl.h} @hdrindex{termios.h} @c FIXME: I need clarifications from Jim. If the use of @code{TIOCGWINSZ} requires @file{<sys/ioctl.h>}, then define @code{GWINSZ_IN_SYS_IOCTL}. Otherwise @code{TIOCGWINSZ} can be found in @file{<termios.h>}. Use: @example @group #ifdef HAVE_TERMIOS_H # include <termios.h> #endif #ifdef GWINSZ_IN_SYS_IOCTL # include <sys/ioctl.h> #endif @end group @end example @end defmac @node Generic Headers @subsection Generic Header Checks These macros are used to find system header files not covered by the ``particular'' test macros. If you need to check the contents of a header as well as find out whether it is present, you have to write your own test for it (@pxref{Writing Tests}). @anchor{AC_CHECK_HEADER} @defmac AC_CHECK_HEADER (@var{header-file}, @ovar{action-if-found}, @ @ovar{action-if-not-found}, @ovar{includes}) @acindex{CHECK_HEADER} @caindex header_@var{header-file} If the system header file @var{header-file} is compilable, execute shell commands @var{action-if-found}, otherwise execute @var{action-if-not-found}. If you just want to define a symbol if the header file is available, consider using @code{AC_CHECK_HEADERS} instead. @var{includes} should be the appropriate @dfn{prerequisite} code, i.e.@: whatever might be required to appear above @samp{#include <@var{header-file}>} for it to compile without error. This can be anything, but will normally be additional @samp{#include} directives. If @var{includes} is omitted or empty, @file{configure} will use the contents of the macro @code{AC_INCLUDES_DEFAULT}. @xref{Default Includes}. This macro used to check only for the @emph{presence} of a header, not whether its contents were acceptable to the compiler. Some older @command{configure} scripts rely on this behavior, so it is still available by specifying @samp{-} as @var{includes}. This mechanism is deprecated as of Autoconf 2.70; situations where a preprocessor-only check is required should use @code{AC_PREPROC_IFELSE}. @xref{Running the Preprocessor}. This macro caches its result in the @code{ac_cv_header_@var{header-file}} variable, with characters not suitable for a variable name mapped to underscores. @end defmac @anchor{AC_CHECK_HEADERS} @defmac AC_CHECK_HEADERS (@var{header-file}@dots{}, @ @ovar{action-if-found}, @ovar{action-if-not-found}, @ @ovar{includes}) @acindex{CHECK_HEADERS} @cvindex HAVE_@var{header} @caindex header_@var{header-file} For each given system header file @var{header-file} in the blank-separated argument list that exists, define @code{HAVE_@var{header-file}} (in all capitals). If @var{action-if-found} is given, it is additional shell code to execute when one of the header files is found. You can give it a value of @samp{break} to break out of the loop on the first match. If @var{action-if-not-found} is given, it is executed when one of the header files is not found. @var{includes} is interpreted as in @code{AC_CHECK_HEADER}, in order to choose the set of preprocessor directives supplied before the header under test. This macro caches its result in the @code{ac_cv_header_@var{header-file}} variable, with characters not suitable for a variable name mapped to underscores. @end defmac @defmac AC_CHECK_HEADERS_ONCE (@var{header-file}@dots{}) @acindex{CHECK_HEADERS_ONCE} @cvindex HAVE_@var{header} For each given system header file @var{header-file} in the blank-separated argument list that exists, define @code{HAVE_@var{header-file}} (in all capitals). If you do not need the full power of @code{AC_CHECK_HEADERS}, this variant generates smaller, faster @command{configure} files. All headers passed to @code{AC_CHECK_HEADERS_ONCE} are checked for in one pass, early during the @command{configure} run. The checks cannot be conditionalized, you cannot specify an @var{action-if-found} or @var{action-if-not-found}, and @code{AC_INCLUDES_DEFAULT} is always used for the prerequisites. @end defmac In previous versions of Autoconf, these macros merely checked whether the header was accepted by the preprocessor. This was changed because the old test was inappropriate for typical uses. Headers are typically used to compile, not merely to preprocess, and the old behavior sometimes accepted headers that clashed at compile-time (@pxref{Present But Cannot Be Compiled}). If for some reason it is inappropriate to check whether a header is compilable, you should use @code{AC_PREPROC_IFELSE} (@pxref{Running the Preprocessor}) instead of these macros. Requiring each header to compile improves the robustness of the test, but it also requires you to make sure that the @var{includes} are correct. Most system headers nowadays make sure to @code{#include} whatever they require, or else have their dependencies satisfied by @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}), but @pxref{Header Portability}, for known exceptions. In general, if you are looking for @file{bar.h}, which requires that @file{foo.h} be included first if it exists, you should do something like this: @example AC_CHECK_HEADERS([foo.h]) AC_CHECK_HEADERS([bar.h], [], [], [#ifdef HAVE_FOO_H # include <foo.h> #endif ]) @end example @node Declarations @section Declarations @cindex Declaration, checking The following macros check for the declaration of variables and functions. If there is no macro specifically defined to check for a symbol you need, then you can use the general macros (@pxref{Generic Declarations}) or, for more complex tests, you may use @code{AC_COMPILE_IFELSE} (@pxref{Running the Compiler}). @menu * Particular Declarations:: Macros to check for certain declarations * Generic Declarations:: How to find other declarations @end menu @node Particular Declarations @subsection Particular Declaration Checks There are no specific macros for declarations. @node Generic Declarations @subsection Generic Declaration Checks These macros are used to find declarations not covered by the ``particular'' test macros. @defmac AC_CHECK_DECL (@var{symbol}, @ovar{action-if-found}, @ @ovar{action-if-not-found}, @dvar{includes, AC_INCLUDES_DEFAULT}) @acindex{CHECK_DECL} @caindex have_decl_@var{symbol} If @var{symbol} (a function, variable, or constant) is not declared in @var{includes} and a declaration is needed, run the shell commands @var{action-if-not-found}, otherwise @var{action-if-found}. @var{includes} is a series of include directives, defaulting to @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}), which are used prior to the declaration under test. This macro actually tests whether @var{symbol} is defined as a macro or can be used as an r-value, not whether it is really declared, because it is much safer to avoid introducing extra declarations when they are not needed. In order to facilitate use of C++ and overloaded function declarations, it is possible to specify function argument types in parentheses for types which can be zero-initialized: @example AC_CHECK_DECL([basename(char *)]) @end example This macro caches its result in the @code{ac_cv_have_decl_@var{symbol}} variable, with characters not suitable for a variable name mapped to underscores. @end defmac @anchor{AC_CHECK_DECLS} @defmac AC_CHECK_DECLS (@var{symbols}, @ovar{action-if-found}, @ @ovar{action-if-not-found}, @dvar{includes, AC_INCLUDES_DEFAULT}) @acindex{CHECK_DECLS} @cvindex HAVE_DECL_@var{symbol} @caindex have_decl_@var{symbol} For each of the @var{symbols} (@emph{comma}-separated list with optional function argument types for C++ overloads), define @code{HAVE_DECL_@var{symbol}} (in all capitals) to @samp{1} if @var{symbol} is declared, otherwise to @samp{0}. If @var{action-if-not-found} is given, it is additional shell code to execute when one of the function declarations is needed, otherwise @var{action-if-found} is executed. @var{includes} is a series of include directives, defaulting to @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}), which are used prior to the declarations under test. This macro uses an M4 list as first argument: @example AC_CHECK_DECLS([strdup]) AC_CHECK_DECLS([strlen]) AC_CHECK_DECLS([malloc, realloc, calloc, free]) AC_CHECK_DECLS([j0], [], [], [[#include <math.h>]]) AC_CHECK_DECLS([[basename(char *)], [dirname(char *)]]) @end example Unlike the other @samp{AC_CHECK_*S} macros, when a @var{symbol} is not declared, @code{HAVE_DECL_@var{symbol}} is defined to @samp{0} instead of leaving @code{HAVE_DECL_@var{symbol}} undeclared. When you are @emph{sure} that the check was performed, use @code{HAVE_DECL_@var{symbol}} in @code{#if}: @example #if !HAVE_DECL_SYMBOL extern char *symbol; #endif @end example @noindent If the test may have not been performed, however, because it is safer @emph{not} to declare a symbol than to use a declaration that conflicts with the system's one, you should use: @example #if defined HAVE_DECL_MALLOC && !HAVE_DECL_MALLOC void *malloc (size_t *s); #endif @end example @noindent You fall into the second category only in extreme situations: either your files may be used without being configured, or they are used during the configuration. In most cases the traditional approach is enough. This macro caches its results in @code{ac_cv_have_decl_@var{symbol}} variables, with characters not suitable for a variable name mapped to underscores. @end defmac @defmac AC_CHECK_DECLS_ONCE (@var{symbols}) @acindex{CHECK_DECLS_ONCE} @cvindex HAVE_DECL_@var{symbol} For each of the @var{symbols} (@emph{comma}-separated list), define @code{HAVE_DECL_@var{symbol}} (in all capitals) to @samp{1} if @var{symbol} is declared in the default include files, otherwise to @samp{0}. This is a once-only variant of @code{AC_CHECK_DECLS}. It generates the checking code at most once, so that @command{configure} is smaller and faster; but the checks cannot be conditionalized and are always done once, early during the @command{configure} run. @end defmac @node Structures @section Structures @cindex Structure, checking The following macros check for the presence of certain members in C structures. If there is no macro specifically defined to check for a member you need, then you can use the general structure-member macros (@pxref{Generic Structures}) or, for more complex tests, you may use @code{AC_COMPILE_IFELSE} (@pxref{Running the Compiler}). @menu * Particular Structures:: Macros to check for certain structure members * Generic Structures:: How to find other structure members @end menu @node Particular Structures @subsection Particular Structure Checks The following macros check for certain structures or structure members. @defmac AC_STRUCT_DIRENT_D_INO @acindex{STRUCT_DIRENT_D_INO} @cvindex HAVE_STRUCT_DIRENT_D_INO @c @caindex header_dirent_dirent_h @c @caindex member_struct_dirent_d_ino Perform all the actions of @code{AC_HEADER_DIRENT} (@pxref{Particular Headers}). Then, if @code{struct dirent} contains a @code{d_ino} member, define @code{HAVE_STRUCT_DIRENT_D_INO}. @code{HAVE_STRUCT_DIRENT_D_INO} indicates only the presence of @code{d_ino}, not whether its contents are always reliable. Traditionally, a zero @code{d_ino} indicated a deleted directory entry, though current systems hide this detail from the user and never return zero @code{d_ino} values. Many current systems report an incorrect @code{d_ino} for a directory entry that is a mount point. @end defmac @defmac AC_STRUCT_DIRENT_D_TYPE @acindex{STRUCT_DIRENT_D_TYPE} @cvindex HAVE_STRUCT_DIRENT_D_TYPE @c @caindex header_dirent_dirent_h @c @caindex member_struct_dirent_d_type Perform all the actions of @code{AC_HEADER_DIRENT} (@pxref{Particular Headers}). Then, if @code{struct dirent} contains a @code{d_type} member, define @code{HAVE_STRUCT_DIRENT_D_TYPE}. @end defmac @anchor{AC_STRUCT_ST_BLOCKS} @defmac AC_STRUCT_ST_BLOCKS @acindex{STRUCT_ST_BLOCKS} @cvindex HAVE_STRUCT_STAT_ST_BLOCKS @cvindex HAVE_ST_BLOCKS @ovindex LIBOBJS @caindex member_struct_stat_st_blocks If @code{struct stat} contains an @code{st_blocks} member, define @code{HAVE_STRUCT_STAT_ST_BLOCKS}. Otherwise, require an @code{AC_LIBOBJ} replacement of @samp{fileblocks}. The former name, @code{HAVE_ST_BLOCKS} is to be avoided, as its support will cease in the future. This macro caches its result in the @code{ac_cv_member_struct_stat_st_blocks} variable. @end defmac @defmac AC_STRUCT_TM @acindex{STRUCT_TM} @cvindex TM_IN_SYS_TIME @hdrindex{time.h} @hdrindex{sys/time.h} If @file{time.h} does not define @code{struct tm}, define @code{TM_IN_SYS_TIME}, which means that including @file{sys/time.h} had better define @code{struct tm}. This macro is obsolescent, as @file{time.h} defines @code{struct tm} in current systems. New programs need not use this macro. @end defmac @anchor{AC_STRUCT_TIMEZONE} @defmac AC_STRUCT_TIMEZONE @acindex{STRUCT_TIMEZONE} @cvindex HAVE_DECL_TZNAME @cvindex HAVE_STRUCT_TM_TM_ZONE @cvindex HAVE_TM_ZONE @cvindex HAVE_TZNAME @c @caindex member_struct_tm_tm_zone @c @caindex struct_tm Figure out how to get the current timezone. If @code{struct tm} has a @code{tm_zone} member, define @code{HAVE_STRUCT_TM_TM_ZONE} (and the obsoleted @code{HAVE_TM_ZONE}). Otherwise, if the external array @code{tzname} is found, define @code{HAVE_TZNAME}; if it is declared, define @code{HAVE_DECL_TZNAME}. @end defmac @node Generic Structures @subsection Generic Structure Checks These macros are used to find structure members not covered by the ``particular'' test macros. @defmac AC_CHECK_MEMBER (@var{aggregate}.@var{member}, @ @ovar{action-if-found}, @ovar{action-if-not-found}, @ @dvar{includes, AC_INCLUDES_DEFAULT}) @acindex{CHECK_MEMBER} @caindex member_@var{aggregate}_@var{member} Check whether @var{member} is a member of the aggregate @var{aggregate}. If no @var{includes} are specified, the default includes are used (@pxref{Default Includes}). @example AC_CHECK_MEMBER([struct passwd.pw_gecos], [], [AC_MSG_ERROR([we need 'passwd.pw_gecos'])], [[#include <pwd.h>]]) @end example You can use this macro for submembers: @example AC_CHECK_MEMBER(struct top.middle.bot) @end example This macro caches its result in the @code{ac_cv_member_@var{aggregate}_@var{member}} variable, with characters not suitable for a variable name mapped to underscores. @end defmac @anchor{AC_CHECK_MEMBERS} @defmac AC_CHECK_MEMBERS (@var{members}, @ovar{action-if-found}, @ @ovar{action-if-not-found}, @dvar{includes, AC_INCLUDES_DEFAULT}) @acindex{CHECK_MEMBERS} @cvindex HAVE_@var{aggregate}_@var{member} Check for the existence of each @samp{@var{aggregate}.@var{member}} of @var{members} using the previous macro. When @var{member} belongs to @var{aggregate}, define @code{HAVE_@var{aggregate}_@var{member}} (in all capitals, with spaces and dots replaced by underscores). If @var{action-if-found} is given, it is executed for each of the found members. If @var{action-if-not-found} is given, it is executed for each of the members that could not be found. @var{includes} is a series of include directives, defaulting to @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}), which are used prior to the members under test. This macro uses M4 lists: @example AC_CHECK_MEMBERS([struct stat.st_rdev, struct stat.st_blksize]) @end example @end defmac @node Types @section Types @cindex Types @cindex C types The following macros check for C types, either builtin or typedefs. If there is no macro specifically defined to check for a type you need, and you don't need to check for any special properties of it, then you can use a general type-check macro. @menu * Particular Types:: Special handling to find certain types * Generic Types:: How to find other types @end menu @node Particular Types @subsection Particular Type Checks @hdrindex{sys/types.h} @hdrindex{stdlib.h} @hdrindex{stdint.h} @hdrindex{inttypes.h} These macros check for particular C types in @file{sys/types.h}, @file{stdlib.h}, @file{stdint.h}, @file{inttypes.h} and others, if they exist. The Gnulib @code{stdint} module is an alternate way to define many of these symbols; it is useful if you prefer your code to assume a C99-or-better environment. @xref{Gnulib}. @anchor{AC_TYPE_GETGROUPS} @defmac AC_TYPE_GETGROUPS @acindex{TYPE_GETGROUPS} @cvindex GETGROUPS_T @caindex type_getgroups Define @code{GETGROUPS_T} to be whichever of @code{gid_t} or @code{int} is the base type of the array argument to @code{getgroups}. This macro caches the base type in the @code{ac_cv_type_getgroups} variable. @end defmac @defmac AC_TYPE_INT8_T @acindex{TYPE_INT8_T} @cvindex HAVE_INT8_T @cvindex int8_t @caindex c_int8_t If @file{stdint.h} or @file{inttypes.h} does not define the type @code{int8_t}, define @code{int8_t} to a signed integer type that is exactly 8 bits wide and that uses two's complement representation, if such a type exists. If you are worried about porting to hosts that lack such a type, you can use the results of this macro in C89-or-later code as follows: @example #if HAVE_STDINT_H # include <stdint.h> #endif #if defined INT8_MAX || defined int8_t @emph{code using int8_t} #else @emph{complicated alternative using >8-bit 'signed char'} #endif @end example This macro caches the type in the @code{ac_cv_c_int8_t} variable. @end defmac @defmac AC_TYPE_INT16_T @acindex{TYPE_INT16_T} @cvindex HAVE_INT16_T @cvindex int16_t @caindex c_int16_t This is like @code{AC_TYPE_INT8_T}, except for 16-bit integers. @end defmac @defmac AC_TYPE_INT32_T @acindex{TYPE_INT32_T} @cvindex HAVE_INT32_T @cvindex int32_t @caindex c_int32_t This is like @code{AC_TYPE_INT8_T}, except for 32-bit integers. @end defmac @defmac AC_TYPE_INT64_T @acindex{TYPE_INT64_T} @cvindex HAVE_INT64_T @cvindex int64_t @caindex c_int64_t This is like @code{AC_TYPE_INT8_T}, except for 64-bit integers. @end defmac @defmac AC_TYPE_INTMAX_T @acindex{TYPE_INTMAX_T} @cvindex HAVE_INTMAX_T @cvindex intmax_t @c @caindex type_intmax_t If @file{stdint.h} or @file{inttypes.h} defines the type @code{intmax_t}, define @code{HAVE_INTMAX_T}. Otherwise, define @code{intmax_t} to the widest signed integer type. @end defmac @defmac AC_TYPE_INTPTR_T @acindex{TYPE_INTPTR_T} @cvindex HAVE_INTPTR_T @cvindex intptr_t @c @caindex type_intptr_t If @file{stdint.h} or @file{inttypes.h} defines the type @code{intptr_t}, define @code{HAVE_INTPTR_T}. Otherwise, define @code{intptr_t} to a signed integer type wide enough to hold a pointer, if such a type exists. @end defmac @defmac AC_TYPE_LONG_DOUBLE @acindex{TYPE_LONG_DOUBLE} @cvindex HAVE_LONG_DOUBLE @caindex type_long_double If the C compiler supports a working @code{long double} type, define @code{HAVE_LONG_DOUBLE}. The @code{long double} type might have the same range and precision as @code{double}. This macro caches its result in the @code{ac_cv_type_long_double} variable. This macro is obsolescent, as current C compilers support @code{long double}. New programs need not use this macro. @end defmac @defmac AC_TYPE_LONG_DOUBLE_WIDER @acindex{TYPE_LONG_DOUBLE_WIDER} @cvindex HAVE_LONG_DOUBLE_WIDER @caindex type_long_double_wider If the C compiler supports a working @code{long double} type with more range or precision than the @code{double} type, define @code{HAVE_LONG_DOUBLE_WIDER}. This macro caches its result in the @code{ac_cv_type_long_double_wider} variable. @end defmac @defmac AC_TYPE_LONG_LONG_INT @acindex{TYPE_LONG_LONG_INT} @cvindex HAVE_LONG_LONG_INT @caindex type_long_long_int If the C compiler supports a working @code{long long int} type, define @code{HAVE_LONG_LONG_INT}. However, this test does not test @code{long long int} values in preprocessor @code{#if} expressions, because too many compilers mishandle such expressions. @xref{Preprocessor Arithmetic}. This macro caches its result in the @code{ac_cv_type_long_long_int} variable. @end defmac @defmac AC_TYPE_MBSTATE_T @acindex{TYPE_MBSTATE_T} @cvindex mbstate_t @hdrindex{wchar.h} @caindex type_mbstate_t Define @code{HAVE_MBSTATE_T} if @code{<wchar.h>} declares the @code{mbstate_t} type. Also, define @code{mbstate_t} to be a type if @code{<wchar.h>} does not declare it. This macro caches its result in the @code{ac_cv_type_mbstate_t} variable. @end defmac @anchor{AC_TYPE_MODE_T} @defmac AC_TYPE_MODE_T @acindex{TYPE_MODE_T} @cvindex mode_t @caindex type_mode_t Define @code{mode_t} to a suitable type, if standard headers do not define it. This macro caches its result in the @code{ac_cv_type_mode_t} variable. @end defmac @anchor{AC_TYPE_OFF_T} @defmac AC_TYPE_OFF_T @acindex{TYPE_OFF_T} @cvindex off_t @caindex type_off_t Define @code{off_t} to a suitable type, if standard headers do not define it. This macro caches its result in the @code{ac_cv_type_off_t} variable. @end defmac @anchor{AC_TYPE_PID_T} @defmac AC_TYPE_PID_T @acindex{TYPE_PID_T} @cvindex pid_t @caindex type_pid_t Define @code{pid_t} to a suitable type, if standard headers do not define it. This macro caches its result in the @code{ac_cv_type_pid_t} variable. @end defmac @anchor{AC_TYPE_SIZE_T} @defmac AC_TYPE_SIZE_T @acindex{TYPE_SIZE_T} @cvindex size_t @caindex type_size_t Define @code{size_t} to a suitable type, if standard headers do not define it. This macro caches its result in the @code{ac_cv_type_size_t} variable. @end defmac @defmac AC_TYPE_SSIZE_T @acindex{TYPE_SSIZE_T} @cvindex ssize_t @caindex type_ssize_t Define @code{ssize_t} to a suitable type, if standard headers do not define it. This macro caches its result in the @code{ac_cv_type_ssize_t} variable. @end defmac @anchor{AC_TYPE_UID_T} @defmac AC_TYPE_UID_T @acindex{TYPE_UID_T} @cvindex uid_t @cvindex gid_t @caindex type_uid_t Define @code{uid_t} and @code{gid_t} to suitable types, if standard headers do not define them. This macro caches its result in the @code{ac_cv_type_uid_t} variable. @end defmac @defmac AC_TYPE_UINT8_T @acindex{TYPE_UINT8_T} @cvindex HAVE_UINT8_T @cvindex uint8_t @caindex c_uint8_t If @file{stdint.h} or @file{inttypes.h} does not define the type @code{uint8_t}, define @code{uint8_t} to an unsigned integer type that is exactly 8 bits wide, if such a type exists. This is like @code{AC_TYPE_INT8_T}, except for unsigned integers. @end defmac @defmac AC_TYPE_UINT16_T @acindex{TYPE_UINT16_T} @cvindex HAVE_UINT16_T @cvindex uint16_t @caindex c_uint16_t This is like @code{AC_TYPE_UINT8_T}, except for 16-bit integers. @end defmac @defmac AC_TYPE_UINT32_T @acindex{TYPE_UINT32_T} @cvindex HAVE_UINT32_T @cvindex uint32_t @caindex c_uint32_t This is like @code{AC_TYPE_UINT8_T}, except for 32-bit integers. @end defmac @defmac AC_TYPE_UINT64_T @acindex{TYPE_UINT64_T} @cvindex HAVE_UINT64_T @cvindex uint64_t @caindex c_uint64_t This is like @code{AC_TYPE_UINT8_T}, except for 64-bit integers. @end defmac @defmac AC_TYPE_UINTMAX_T @acindex{TYPE_UINTMAX_T} @cvindex HAVE_UINTMAX_T @cvindex uintmax_t @c @caindex type_uintmax_t If @file{stdint.h} or @file{inttypes.h} defines the type @code{uintmax_t}, define @code{HAVE_UINTMAX_T}. Otherwise, define @code{uintmax_t} to the widest unsigned integer type. @end defmac @defmac AC_TYPE_UINTPTR_T @acindex{TYPE_UINTPTR_T} @cvindex HAVE_UINTPTR_T @cvindex uintptr_t @c @caindex type_uintptr_t If @file{stdint.h} or @file{inttypes.h} defines the type @code{uintptr_t}, define @code{HAVE_UINTPTR_T}. Otherwise, define @code{uintptr_t} to an unsigned integer type wide enough to hold a pointer, if such a type exists. @end defmac @defmac AC_TYPE_UNSIGNED_LONG_LONG_INT @acindex{TYPE_UNSIGNED_LONG_LONG_INT} @cvindex HAVE_UNSIGNED_LONG_LONG_INT @caindex type_unsigned_long_long_int If the C compiler supports a working @code{unsigned long long int} type, define @code{HAVE_UNSIGNED_LONG_LONG_INT}. However, this test does not test @code{unsigned long long int} values in preprocessor @code{#if} expressions, because too many compilers mishandle such expressions. @xref{Preprocessor Arithmetic}. This macro caches its result in the @code{ac_cv_type_unsigned_long_long_int} variable. @end defmac @node Generic Types @subsection Generic Type Checks These macros are used to check for types not covered by the ``particular'' test macros. @defmac AC_CHECK_TYPE (@var{type}, @ovar{action-if-found}, @ @ovar{action-if-not-found}, @dvar{includes, AC_INCLUDES_DEFAULT}) @acindex{CHECK_TYPE} @caindex type_@var{type} Check whether @var{type} is defined. It may be a compiler builtin type or defined by the @var{includes}. @var{includes} is a series of include directives, defaulting to @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}), which are used prior to the type under test. In C, @var{type} must be a type-name, so that the expression @samp{sizeof (@var{type})} is valid (but @samp{sizeof ((@var{type}))} is not). The same test is applied when compiling for C++, which means that in C++ @var{type} should be a type-id and should not be an anonymous @samp{struct} or @samp{union}. This macro caches its result in the @code{ac_cv_type_@var{type}} variable, with @samp{*} mapped to @samp{p} and other characters not suitable for a variable name mapped to underscores. @end defmac @defmac AC_CHECK_TYPES (@var{types}, @ovar{action-if-found}, @ @ovar{action-if-not-found}, @dvar{includes, AC_INCLUDES_DEFAULT}) @acindex{CHECK_TYPES} @cvindex HAVE_@var{type} For each @var{type} of the @var{types} that is defined, define @code{HAVE_@var{type}} (in all capitals). Each @var{type} must follow the rules of @code{AC_CHECK_TYPE}. If no @var{includes} are specified, the default includes are used (@pxref{Default Includes}). If @var{action-if-found} is given, it is additional shell code to execute when one of the types is found. If @var{action-if-not-found} is given, it is executed when one of the types is not found. This macro uses M4 lists: @example AC_CHECK_TYPES([ptrdiff_t]) AC_CHECK_TYPES([unsigned long long int, uintmax_t]) AC_CHECK_TYPES([float_t], [], [], [[#include <math.h>]]) @end example @end defmac Autoconf, up to 2.13, used to provide to another version of @code{AC_CHECK_TYPE}, broken by design. In order to keep backward compatibility, a simple heuristic, quite safe but not totally, is implemented. In case of doubt, read the documentation of the former @code{AC_CHECK_TYPE}, see @ref{Obsolete Macros}. @node Compilers and Preprocessors @section Compilers and Preprocessors @cindex Compilers @cindex Preprocessors @ovindex EXEEXT All the tests for compilers (@code{AC_PROG_CC}, @code{AC_PROG_CXX}, @code{AC_PROG_F77}) define the output variable @code{EXEEXT} based on the output of the compiler, typically to the empty string if Posix and @samp{.exe} if a DOS variant. @ovindex OBJEXT They also define the output variable @code{OBJEXT} based on the output of the compiler, after @file{.c} files have been excluded, typically to @samp{o} if Posix, @samp{obj} if a DOS variant. If the compiler being used does not produce executables, the tests fail. If the executables can't be run, and cross-compilation is not enabled, they fail too. @xref{Manual Configuration}, for more on support for cross compiling. @menu * Specific Compiler Characteristics:: Some portability issues * Generic Compiler Characteristics:: Language independent tests and features * C Compiler:: Checking its characteristics * C++ Compiler:: Likewise * Objective C Compiler:: Likewise * Objective C++ Compiler:: Likewise * Erlang Compiler and Interpreter:: Likewise * Fortran Compiler:: Likewise * Go Compiler:: Likewise @end menu @node Specific Compiler Characteristics @subsection Specific Compiler Characteristics Some compilers exhibit different behaviors. @table @asis @item Static/Dynamic Expressions Autoconf relies on a trick to extract one bit of information from the C compiler: using negative array sizes. For instance the following excerpt of a C source demonstrates how to test whether @samp{int} objects are 4 bytes wide: @example static int test_array[sizeof (int) == 4 ? 1 : -1]; @end example @noindent To our knowledge, there is a single compiler that does not support this trick: the HP C compilers (the real ones, not only the ``bundled'') on HP-UX 11.00. They incorrectly reject the above program with the diagnostic ``Variable-length arrays cannot have static storage.'' This bug comes from HP compilers' mishandling of @code{sizeof (int)}, not from the @code{? 1 : -1}, and Autoconf works around this problem by casting @code{sizeof (int)} to @code{long int} before comparing it. @end table @node Generic Compiler Characteristics @subsection Generic Compiler Characteristics @anchor{AC_CHECK_SIZEOF} @defmac AC_CHECK_SIZEOF (@var{type-or-expr}, @ovar{unused}, @ @dvar{includes, AC_INCLUDES_DEFAULT}) @acindex{CHECK_SIZEOF} @cvindex SIZEOF_@var{type-or-expr} @caindex sizeof_@var{type-or-expr} Define @code{SIZEOF_@var{type-or-expr}} (@pxref{Standard Symbols}) to be the size in bytes of @var{type-or-expr}, which may be either a type or an expression returning a value that has a size. If the expression @samp{sizeof (@var{type-or-expr})} is invalid, the result is 0. @var{includes} is a series of include directives, defaulting to @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}), which are used prior to the expression under test. This macro now works even when cross-compiling. The @var{unused} argument was used when cross-compiling. For example, the call @example @c If you change this example, adjust tests/semantics.at:AC_CHECK_SIZEOF struct. AC_CHECK_SIZEOF([int *]) @end example @noindent defines @code{SIZEOF_INT_P} to be 8 on DEC Alpha AXP systems. This macro caches its result in the @code{ac_cv_sizeof_@var{type-or-expr}} variable, with @samp{*} mapped to @samp{p} and other characters not suitable for a variable name mapped to underscores. @end defmac @defmac AC_CHECK_ALIGNOF (@var{type}, @dvar{includes, AC_INCLUDES_DEFAULT}) @acindex{CHECK_ALIGNOF} @cvindex ALIGNOF_@var{type} @caindex alignof_@var{type-or-expr} Define @code{ALIGNOF_@var{type}} (@pxref{Standard Symbols}) to be the alignment in bytes of @var{type}. @samp{@var{type} y;} must be valid as a structure member declaration. If @samp{type} is unknown, the result is 0. If no @var{includes} are specified, the default includes are used (@pxref{Default Includes}). This macro caches its result in the @code{ac_cv_alignof_@var{type-or-expr}} variable, with @samp{*} mapped to @samp{p} and other characters not suitable for a variable name mapped to underscores. @end defmac @defmac AC_COMPUTE_INT (@var{var}, @var{expression}, @ @dvar{includes, AC_INCLUDES_DEFAULT}, @ovar{action-if-fails}) @acindex{COMPUTE_INT} Store into the shell variable @var{var} the value of the integer @var{expression}. The value should fit in an initializer in a C variable of type @code{signed long}. To support cross compilation (in which case, the macro only works on hosts that use twos-complement arithmetic), it should be possible to evaluate the expression at compile-time. If no @var{includes} are specified, the default includes are used (@pxref{Default Includes}). Execute @var{action-if-fails} if the value cannot be determined correctly. @end defmac @defmac AC_LANG_WERROR @acindex{LANG_WERROR} Normally Autoconf ignores warnings generated by the compiler, linker, and preprocessor. If this macro is used, warnings count as fatal errors for the current language. This macro is useful when the results of configuration are used where warnings are unacceptable; for instance, if parts of a program are built with the GCC @option{-Werror} option. If the whole program is built using @option{-Werror} it is often simpler to put @option{-Werror} in the compiler flags (@code{CFLAGS}, etc.). @end defmac @defmac AC_OPENMP @acindex{OPENMP} @cvindex _OPENMP @ovindex OPENMP_CFLAGS @ovindex OPENMP_CXXFLAGS @ovindex OPENMP_FFLAGS @ovindex OPENMP_FCFLAGS @caindex prog_c_openmp @caindex prog_cxx_openmp @caindex prog_f77_openmp @caindex prog_fc_openmp @uref{http://@/www.openmp.org/, OpenMP} specifies extensions of C, C++, and Fortran that simplify optimization of shared memory parallelism, which is a common problem on multi-core CPUs. If the current language is C, the macro @code{AC_OPENMP} sets the variable @code{OPENMP_CFLAGS} to the C compiler flags needed for supporting OpenMP@. @code{OPENMP_CFLAGS} is set to empty if the compiler already supports OpenMP, if it has no way to activate OpenMP support, or if the user rejects OpenMP support by invoking @samp{configure} with the @samp{--disable-openmp} option. @code{OPENMP_CFLAGS} needs to be used when compiling programs, when preprocessing program source, and when linking programs. Therefore you need to add @code{$(OPENMP_CFLAGS)} to the @code{CFLAGS} of C programs that use OpenMP@. If you preprocess OpenMP-specific C code, you also need to add @code{$(OPENMP_CFLAGS)} to @code{CPPFLAGS}. The presence of OpenMP support is revealed at compile time by the preprocessor macro @code{_OPENMP}. Linking a program with @code{OPENMP_CFLAGS} typically adds one more shared library to the program's dependencies, so its use is recommended only on programs that actually require OpenMP. If the current language is C++, @code{AC_OPENMP} sets the variable @code{OPENMP_CXXFLAGS}, suitably for the C++ compiler. The same remarks hold as for C. If the current language is Fortran 77 or Fortran, @code{AC_OPENMP} sets the variable @code{OPENMP_FFLAGS} or @code{OPENMP_FCFLAGS}, respectively. Similar remarks as for C hold, except that @code{CPPFLAGS} is not used for Fortran, and no preprocessor macro signals OpenMP support. For portability, it is best to avoid spaces between @samp{#} and @samp{pragma omp}. That is, write @samp{#pragma omp}, not @samp{# pragma omp}. The Sun WorkShop 6.2 C compiler chokes on the latter. This macro caches its result in the @code{ac_cv_prog_c_openmp}, @code{ac_cv_prog_cxx_openmp}, @code{ac_cv_prog_f77_openmp}, or @code{ac_cv_prog_fc_openmp} variable, depending on the current language. @strong{Caution:} Some of the compiler options that @code{AC_OPENMP} tests, mean ``enable OpenMP'' to one compiler, but ``write output to a file named @file{mp} or @file{penmp}'' to other compilers. We cannot guarantee that the implementation of @code{AC_OPENMP} will not overwrite an existing file with either of these names. Therefore, as a defensive measure, a @command{configure} script that uses @code{AC_OPENMP} will issue an error and stop (before doing any of the operations that might overwrite these files) upon encountering either of these files in its working directory. @command{autoconf} will also issue an error if it finds either of these files in the same directory as a @file{configure.ac} that uses @code{AC_OPENMP}. If you have files with either of these names at the top level of your source tree, and you need to use @code{AC_OPENMP}, we recommend you either change their names or move them into a subdirectory. @end defmac @node C Compiler @subsection C Compiler Characteristics The following macros provide ways to find and exercise a C Compiler. There are a few constructs that ought to be avoided, but do not deserve being checked for, since they can easily be worked around. @table @asis @item Don't use lines containing solitary backslashes They tickle a bug in the HP-UX C compiler (checked on HP-UX 10.20, 11.00, and 11i). When given the following source: @example #ifdef __STDC__ /\ * A comment with backslash-newlines in it. %@{ %@} *\ \ / char str[] = "\\ " A string with backslash-newlines in it %@{ %@} \\ ""; char apostrophe = '\\ \ '\ '; #endif @end example @noindent the compiler incorrectly fails with the diagnostics ``Non-terminating comment at end of file'' and ``Missing @samp{#endif} at end of file.'' Removing the lines with solitary backslashes solves the problem. @item Don't compile several files at once if output matters to you Some compilers, such as HP's, report names of files being compiled when given more than one file operand. For instance: @example $ @kbd{cc a.c b.c} a.c: b.c: @end example @noindent This can cause problems if you observe the output of the compiler to detect failures. Invoking @samp{cc -c a.c && cc -c b.c && cc -o c a.o b.o} solves the issue. @item Don't rely on @code{#error} failing The IRIX C compiler does not fail when #error is preprocessed; it simply emits a diagnostic and continues, exiting successfully. So, instead of an error directive like @code{#error "Unsupported word size"} it is more portable to use an invalid directive like @code{#Unsupported word size} in Autoconf tests. In ordinary source code, @code{#error} is OK, since installers with inadequate compilers like IRIX can simply examine these compilers' diagnostic output. @item Don't rely on correct @code{#line} support On Solaris, @command{c89} (at least Sun C 5.3 through 5.8) diagnoses @code{#line} directives whose line numbers are greater than 32767. Nothing in Posix makes this invalid. That is why Autoconf stopped issuing @code{#line} directives. @end table @anchor{AC_PROG_CC} @defmac AC_PROG_CC (@ovar{compiler-search-list}) @acindex{PROG_CC} @evindex CC @evindex CFLAGS @ovindex CC @ovindex CFLAGS Determine a C compiler to use. If the environment variable @code{CC} is set, its value will be taken as the name of the C compiler to use. Otherwise, search for a C compiler under a series of likely names, trying @code{gcc} and @code{cc} first. Regardless, the output variable @code{CC} is set to the chosen compiler. If the optional first argument to the macro is used, it must be a whitespace-separated list of potential names for a C compiler, which overrides the built-in list. If no C compiler can be found, @command{configure} will error out. If the selected C compiler is found to be GNU C (regardless of its name), the shell variable @code{GCC} will be set to @samp{yes}. If the shell variable @code{CFLAGS} was not already set, it is set to @option{-g -O2} for the GNU C compiler (@option{-O2} on systems where GCC does not accept @option{-g}), or @option{-g} for other compilers. @code{CFLAGS} is then made an output variable. You can override the default for @code{CFLAGS} by inserting a shell default assignment between @code{AC_INIT} and @code{AC_PROG_CC}: @example : $@{CFLAGS="@var{options}"@} @end example where @var{options} are the appropriate set of options to use by default. (It is important to use this construct rather than a normal assignment, so that @code{CFLAGS} can still be overridden by the person building the package. @xref{Preset Output Variables}.) If necessary, options are added to @code{CC} to enable support for ISO Standard C features with extensions, preferring the newest edition of the C standard that is supported. Currently the newest edition Autoconf knows how to detect support for is ISO C 2011. After calling this macro you can check whether the C compiler has been set to accept standard C by inspecting the shell variable @code{ac_prog_cc_stdc}. Its value will be @samp{c11}, @samp{c99}, or @samp{c89}, respectively, if the C compiler has been set to use the 2011, 1999, or 1990 edition of the C standard, and @samp{no} if the compiler does not support compiling standard C at all. The tests for standard conformance are not comprehensive. They test the values of @code{__STDC__} and @code{__STDC_VERSION__}, and a representative sample of the language features added in each version of the C standard. They do not test the C standard library, because the C compiler might be generating code for a ``freestanding environment'' (in which most of the standard library is optional). If you need to know whether a particular C standard header exists, use @code{AC_CHECK_HEADER}. None of the options that may be added to @code{CC} by this macro enable @emph{strict} conformance to the C standard. In particular, system-specific extensions are not disabled. (For example, for GNU C, the @option{-std=gnu@var{nn}} options may be used, but not the @option{-std=c@var{nn}} options.) Many Autoconf macros use a compiler, and thus call @samp{AC_REQUIRE([AC_PROG_CC])} to ensure that the compiler has been determined before the body of the outermost @code{AC_DEFUN} macro. Although @code{AC_PROG_CC} is safe to directly expand multiple times, it performs certain checks (such as the proper value of @env{EXEEXT}) only on the first invocation. Therefore, care must be used when invoking this macro from within another macro rather than at the top level (@pxref{Expanded Before Required}). @end defmac @anchor{AC_PROG_CC_C_O} @defmac AC_PROG_CC_C_O @acindex{PROG_CC_C_O} @cvindex NO_MINUS_C_MINUS_O @caindex prog_cc_@var{compiler}_c_o If the C compiler does not accept the @option{-c} and @option{-o} options simultaneously, define @code{NO_MINUS_C_MINUS_O}. This macro actually tests both the compiler found by @code{AC_PROG_CC}, and, if different, the first @code{cc} in the path. The test fails if one fails. This macro was created for GNU Make to choose the default C compilation rule. For the compiler @var{compiler}, this macro caches its result in the @code{ac_cv_prog_cc_@var{compiler}_c_o} variable. @end defmac @defmac AC_PROG_CPP @acindex{PROG_CPP} @evindex CPP @ovindex CPP Set output variable @code{CPP} to a command that runs the C preprocessor. If @samp{$CC -E} doesn't work, tries @code{cpp} and @file{/lib/cpp}, in that order. It is only portable to run @code{CPP} on files with a @file{.c} extension. Some preprocessors don't indicate missing include files by the error status. For such preprocessors an internal variable is set that causes other macros to check the standard error from the preprocessor and consider the test failed if any warnings have been reported. For most preprocessors, though, warnings do not cause include-file tests to fail unless @code{AC_PROG_CPP_WERROR} is also specified. @end defmac @defmac AC_PROG_CPP_WERROR @acindex{PROG_CPP_WERROR} @ovindex CPP This acts like @code{AC_PROG_CPP}, except it treats warnings from the preprocessor as errors even if the preprocessor exit status indicates success. This is useful for avoiding headers that generate mandatory warnings, such as deprecation notices. @end defmac The following macros check for C compiler or machine architecture features. To check for characteristics not listed here, use @code{AC_COMPILE_IFELSE} (@pxref{Running the Compiler}) or @code{AC_RUN_IFELSE} (@pxref{Runtime}). @defmac AC_C_BACKSLASH_A @acindex{C_BACKSLASH_A} @cvindex HAVE_C_BACKSLASH_A Define @samp{HAVE_C_BACKSLASH_A} to 1 if the C compiler understands @samp{\a}. This macro is obsolescent, as current C compilers understand @samp{\a}. New programs need not use this macro. @end defmac @anchor{AC_C_BIGENDIAN} @defmac AC_C_BIGENDIAN (@ovar{action-if-true}, @ovar{action-if-false}, @ @ovar{action-if-unknown}, @ovar{action-if-universal}) @acindex{C_BIGENDIAN} @cvindex WORDS_BIGENDIAN @cindex Endianness If words are stored with the most significant byte first (like Motorola and SPARC CPUs), execute @var{action-if-true}. If words are stored with the least significant byte first (like Intel and VAX CPUs), execute @var{action-if-false}. This macro runs a test-case if endianness cannot be determined from the system header files. When cross-compiling, the test-case is not run but grep'ed for some magic values. @var{action-if-unknown} is executed if the latter case fails to determine the byte sex of the host system. In some cases a single run of a compiler can generate code for multiple architectures. This can happen, for example, when generating Mac OS X universal binary files, which work on both PowerPC and Intel architectures. In this case, the different variants might be for architectures with differing endianness. If @command{configure} detects this, it executes @var{action-if-universal} instead of @var{action-if-unknown}. The default for @var{action-if-true} is to define @samp{WORDS_BIGENDIAN}. The default for @var{action-if-false} is to do nothing. The default for @var{action-if-unknown} is to abort configure and tell the installer how to bypass this test. And finally, the default for @var{action-if-universal} is to ensure that @samp{WORDS_BIGENDIAN} is defined if and only if a universal build is detected and the current code is big-endian; this default works only if @command{autoheader} is used (@pxref{autoheader Invocation}). If you use this macro without specifying @var{action-if-universal}, you should also use @code{AC_CONFIG_HEADERS}; otherwise @samp{WORDS_BIGENDIAN} may be set incorrectly for Mac OS X universal binary files. @end defmac @anchor{AC_C_CONST} @defmac AC_C_CONST @acindex{C_CONST} @cvindex const @caindex c_const If the C compiler does not fully support the @code{const} keyword, define @code{const} to be empty. Some C compilers that do not define @code{__STDC__} do support @code{const}; some compilers that define @code{__STDC__} do not completely support @code{const}. Programs can simply use @code{const} as if every C compiler supported it; for those that don't, the makefile or configuration header file defines it as empty. Occasionally installers use a C++ compiler to compile C code, typically because they lack a C compiler. This causes problems with @code{const}, because C and C++ treat @code{const} differently. For example: @example const int foo; @end example @noindent is valid in C but not in C++. These differences unfortunately cannot be papered over by defining @code{const} to be empty. If @command{autoconf} detects this situation, it leaves @code{const} alone, as this generally yields better results in practice. However, using a C++ compiler to compile C code is not recommended or supported, and installers who run into trouble in this area should get a C compiler like GCC to compile their C code. This macro caches its result in the @code{ac_cv_c_const} variable. This macro is obsolescent, as current C compilers support @code{const}. New programs need not use this macro. @end defmac @defmac AC_C__GENERIC @acindex{C__GENERIC} @cvindex _Generic If the C compiler supports C11-style generic selection using the @code{_Generic} keyword, define @code{HAVE_C__GENERIC}. @end defmac @defmac AC_C_RESTRICT @acindex{C_RESTRICT} @cvindex restrict @caindex c_restrict If the C compiler recognizes a variant spelling for the @code{restrict} keyword (@code{__restrict}, @code{__restrict__}, or @code{_Restrict}), then define @code{restrict} to that; this is more likely to do the right thing with compilers that support language variants where plain @code{restrict} is not a keyword. Otherwise, if the C compiler recognizes the @code{restrict} keyword, don't do anything. Otherwise, define @code{restrict} to be empty. Thus, programs may simply use @code{restrict} as if every C compiler supported it; for those that do not, the makefile or configuration header defines it away. Although support in C++ for the @code{restrict} keyword is not required, several C++ compilers do accept the keyword. This macro works for them, too. This macro caches @samp{no} in the @code{ac_cv_c_restrict} variable if @code{restrict} is not supported, and a supported spelling otherwise. @end defmac @defmac AC_C_VOLATILE @acindex{C_VOLATILE} @cvindex volatile If the C compiler does not understand the keyword @code{volatile}, define @code{volatile} to be empty. Programs can simply use @code{volatile} as if every C compiler supported it; for those that do not, the makefile or configuration header defines it as empty. If the correctness of your program depends on the semantics of @code{volatile}, simply defining it to be empty does, in a sense, break your code. However, given that the compiler does not support @code{volatile}, you are at its mercy anyway. At least your program compiles, when it wouldn't before. @xref{Volatile Objects}, for more about @code{volatile}. In general, the @code{volatile} keyword is a standard C feature, so you might expect that @code{volatile} is available only when @code{__STDC__} is defined. However, Ultrix 4.3's native compiler does support volatile, but does not define @code{__STDC__}. This macro is obsolescent, as current C compilers support @code{volatile}. New programs need not use this macro. @end defmac @anchor{AC_C_INLINE} @defmac AC_C_INLINE @acindex{C_INLINE} @cvindex inline If the C compiler supports the keyword @code{inline}, do nothing. Otherwise define @code{inline} to @code{__inline__} or @code{__inline} if it accepts one of those, otherwise define @code{inline} to be empty. @end defmac @anchor{AC_C_CHAR_UNSIGNED} @defmac AC_C_CHAR_UNSIGNED @acindex{C_CHAR_UNSIGNED} @cvindex __CHAR_UNSIGNED__ If the C type @code{char} is unsigned, define @code{__CHAR_UNSIGNED__}, unless the C compiler predefines it. These days, using this macro is not necessary. The same information can be determined by this portable alternative, thus avoiding the use of preprocessor macros in the namespace reserved for the implementation. @example #include <limits.h> #if CHAR_MIN == 0 # define CHAR_UNSIGNED 1 #endif @end example @end defmac @defmac AC_C_STRINGIZE @acindex{C_STRINGIZE} @cvindex HAVE_STRINGIZE If the C preprocessor supports the stringizing operator, define @code{HAVE_STRINGIZE}. The stringizing operator is @samp{#} and is found in macros such as this: @example #define x(y) #y @end example This macro is obsolescent, as current C compilers support the stringizing operator. New programs need not use this macro. @end defmac @defmac AC_C_FLEXIBLE_ARRAY_MEMBER @acindex{C_FLEXIBLE_ARRAY_MEMBER} @cvindex FLEXIBLE_ARRAY_MEMBER If the C compiler supports flexible array members, define @code{FLEXIBLE_ARRAY_MEMBER} to nothing; otherwise define it to 1. That way, a declaration like this: @example struct s @{ size_t n_vals; double val[FLEXIBLE_ARRAY_MEMBER]; @}; @end example @noindent will let applications use the ``struct hack'' even with compilers that do not support flexible array members. To allocate and use such an object, you can use code like this: @example size_t i; size_t n = compute_value_count (); struct s *p = malloc (offsetof (struct s, val) + n * sizeof (double)); p->n_vals = n; for (i = 0; i < n; i++) p->val[i] = compute_value (i); @end example @end defmac @defmac AC_C_VARARRAYS @acindex{C_VARARRAYS} @cvindex __STDC_NO_VLA__ @cvindex HAVE_C_VARARRAYS If the C compiler does not support variable-length arrays, define the macro @code{__STDC_NO_VLA__} to be 1 if it is not already defined. A variable-length array is an array of automatic storage duration whose length is determined at run time, when the array is declared. For backward compatibility this macro also defines @code{HAVE_C_VARARRAYS} if the C compiler supports variable-length arrays, but this usage is obsolescent and new programs should use @code{__STDC_NO_VLA__}. @end defmac @defmac AC_C_TYPEOF @acindex{C_TYPEOF} @cvindex HAVE_TYPEOF @cvindex typeof If the C compiler supports GNU C's @code{typeof} syntax either directly or through a different spelling of the keyword (e.g., @code{__typeof__}), define @code{HAVE_TYPEOF}. If the support is available only through a different spelling, define @code{typeof} to that spelling. @end defmac @defmac AC_C_PROTOTYPES @acindex{C_PROTOTYPES} @cvindex PROTOTYPES @cvindex __PROTOTYPES @cvindex PARAMS If function prototypes are understood by the compiler (as determined by @code{AC_PROG_CC}), define @code{PROTOTYPES} and @code{__PROTOTYPES}. Defining @code{__PROTOTYPES} is for the benefit of header files that cannot use macros that infringe on user name space. This macro is obsolescent, as current C compilers support prototypes. New programs need not use this macro. @end defmac @anchor{AC_PROG_GCC_TRADITIONAL} @defmac AC_PROG_GCC_TRADITIONAL @acindex{PROG_GCC_TRADITIONAL} @ovindex CC Add @option{-traditional} to output variable @code{CC} if using a GNU C compiler and @code{ioctl} does not work properly without @option{-traditional}. That usually happens when the fixed header files have not been installed on an old system. This macro is obsolescent, since current versions of the GNU C compiler fix the header files automatically when installed. @end defmac @node C++ Compiler @subsection C++ Compiler Characteristics @defmac AC_PROG_CXX (@ovar{compiler-search-list}) @acindex{PROG_CXX} @evindex CXX @evindex CXXFLAGS @ovindex CXX @ovindex CXXFLAGS Determine a C++ compiler to use. If either the environment variable @code{CXX} or the environment variable @code{CCC} is set, its value will be taken as the name of a C++ compiler. If both are set, @code{CXX} is preferred. If neither are set, search for a C++ compiler under a series of likely names, trying @code{g++} and @code{c++} first. Regardless, the output variable @code{CXX} is set to the chosen compiler. If the optional first argument to the macro is used, it must be a whitespace-separated list of potential names for a C++ compiler, which overrides the built-in list. If no C++ compiler can be found, as a last resort @code{CXX} is set to @code{g++} (and subsequent tests will probably fail). If the selected C++ compiler is found to be GNU C++ (regardless of its name), the shell variable @code{GXX} will be set to @samp{yes}. If the shell variable @code{CXXFLAGS} was not already set, it is set to @option{-g -O2} for the GNU C++ compiler (@option{-O2} on systems where G++ does not accept @option{-g}), or @option{-g} for other compilers. @code{CXXFLAGS} is then made an output variable. You can override the default for @code{CXXFLAGS} by inserting a shell default assignment between @code{AC_INIT} and @code{AC_PROG_CXX}: @example : $@{CXXFLAGS="@var{options}"@} @end example where @var{options} are the appropriate set of options to use by default. (It is important to use this construct rather than a normal assignment, so that @code{CXXFLAGS} can still be overridden by the person building the package. @xref{Preset Output Variables}.) If necessary, options are added to @code{CXX} to enable support for ISO Standard C++ features with extensions, preferring the newest edition of the C++ standard that is supported. Currently the newest edition Autoconf knows how to detect support for is ISO C++ 2011. After calling this macro, you can check whether the C++ compiler has been set to accept standard C++ by inspecting the shell variable @code{ac_prog_cc_stdc}. Its value will be @samp{cxx11} or @samp{cxx98}, respectively, if the C++ compiler has been set to use the 2011 or 1990 edition of the C++ standard, and @samp{no} if the compiler does not support compiling standard C++ at all. The tests for standard conformance are not comprehensive. They test the value of @code{__cplusplus} and a representative sample of the language features added in each version of the C++ standard. They do not test the C++ standard library, because this can be extremely slow, and because the C++ compiler might be generating code for a ``freestanding environment'' (in which most of the C++ standard library is optional). If you need to know whether a particular C++ standard header exists, use @code{AC_CHECK_HEADER}. None of the options that may be added to @code{CXX} by this macro enable @emph{strict} conformance to the C++ standard. In particular, system-specific extensions are not disabled. (For example, for GNU C++, the @option{-std=gnu++@var{nn}} options may be used, but not the @option{-std=c++@var{nn}} options.) @end defmac @defmac AC_PROG_CXXCPP @acindex{PROG_CXXCPP} @evindex CXXCPP @ovindex CXXCPP Set output variable @code{CXXCPP} to a command that runs the C++ preprocessor. If @samp{$CXX -E} doesn't work, tries @code{cpp} and @file{/lib/cpp}, in that order. Because of this fallback, @code{CXXCPP} may or may not set C++-specific predefined macros (such as @code{__cplusplus}). It is portable to run @code{CXXCPP} only on files with a @file{.c}, @file{.C}, @file{.cc}, or @file{.cpp} extension. Some preprocessors don't indicate missing include files by the error status. For such preprocessors an internal variable is set that causes other macros to check the standard error from the preprocessor and consider the test failed if any warnings have been reported. However, it is not known whether such broken preprocessors exist for C++. @end defmac @defmac AC_PROG_CXX_C_O @acindex{PROG_CXX_C_O} @cvindex CXX_NO_MINUS_C_MINUS_O Test whether the C++ compiler accepts the options @option{-c} and @option{-o} simultaneously, and define @code{CXX_NO_MINUS_C_MINUS_O}, if it does not. @end defmac @node Objective C Compiler @subsection Objective C Compiler Characteristics @defmac AC_PROG_OBJC (@ovar{compiler-search-list}) @acindex{PROG_OBJC} @evindex OBJC @evindex OBJCFLAGS @ovindex OBJC @ovindex OBJCFLAGS Determine an Objective C compiler to use. If @code{OBJC} is not already set in the environment, check for Objective C compilers. Set output variable @code{OBJC} to the name of the compiler found. This macro may, however, be invoked with an optional first argument which, if specified, must be a blank-separated list of Objective C compilers to search for. This just gives the user an opportunity to specify an alternative search list for the Objective C compiler. For example, if you didn't like the default order, then you could invoke @code{AC_PROG_OBJC} like this: @example AC_PROG_OBJC([gcc objcc objc]) @end example If using a compiler that supports GNU Objective C, set shell variable @code{GOBJC} to @samp{yes}. If output variable @code{OBJCFLAGS} was not already set, set it to @option{-g -O2} for a GNU Objective C compiler (@option{-O2} on systems where the compiler does not accept @option{-g}), or @option{-g} for other compilers. @end defmac @defmac AC_PROG_OBJCPP @acindex{PROG_OBJCPP} @evindex OBJCPP @ovindex OBJCPP Set output variable @code{OBJCPP} to a command that runs the Objective C preprocessor. If @samp{$OBJC -E} doesn't work, tries @code{cpp} and @file{/lib/cpp}, in that order. Because of this fallback, @code{CXXCPP} may or may not set Objective-C-specific predefined macros (such as @code{__OBJC__}). @end defmac @node Objective C++ Compiler @subsection Objective C++ Compiler Characteristics @defmac AC_PROG_OBJCXX (@ovar{compiler-search-list}) @acindex{PROG_OBJCXX} @evindex OBJCXX @evindex OBJCXXFLAGS @ovindex OBJCXX @ovindex OBJCXXFLAGS Determine an Objective C++ compiler to use. If @code{OBJCXX} is not already set in the environment, check for Objective C++ compilers. Set output variable @code{OBJCXX} to the name of the compiler found. This macro may, however, be invoked with an optional first argument which, if specified, must be a blank-separated list of Objective C++ compilers to search for. This just gives the user an opportunity to specify an alternative search list for the Objective C++ compiler. For example, if you didn't like the default order, then you could invoke @code{AC_PROG_OBJCXX} like this: @example AC_PROG_OBJCXX([gcc g++ objcc++ objcxx]) @end example If using a compiler that supports GNU Objective C++, set shell variable @code{GOBJCXX} to @samp{yes}. If output variable @code{OBJCXXFLAGS} was not already set, set it to @option{-g -O2} for a GNU Objective C++ compiler (@option{-O2} on systems where the compiler does not accept @option{-g}), or @option{-g} for other compilers. @end defmac @defmac AC_PROG_OBJCXXCPP @acindex{PROG_OBJCXXCPP} @evindex OBJCXXCPP @ovindex OBJCXXCPP Set output variable @code{OBJCXXCPP} to a command that runs the Objective C++ preprocessor. If @samp{$OBJCXX -E} doesn't work, tries @code{cpp} and @file{/lib/cpp}, in that order. Because of this fallback, @code{CXXCPP} may or may not set Objective-C++-specific predefined macros (such as @code{__cplusplus} and @code{__OBJC__}). @end defmac @node Erlang Compiler and Interpreter @subsection Erlang Compiler and Interpreter Characteristics @cindex Erlang Autoconf defines the following macros for determining paths to the essential Erlang/OTP programs: @defmac AC_ERLANG_PATH_ERLC (@ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{ERLANG_PATH_ERLC} @evindex ERLC @evindex ERLCFLAGS @ovindex ERLC @ovindex ERLCFLAGS Determine an Erlang compiler to use. If @code{ERLC} is not already set in the environment, check for @command{erlc}. Set output variable @code{ERLC} to the complete path of the compiler command found. In addition, if @code{ERLCFLAGS} is not set in the environment, set it to an empty value. The two optional arguments have the same meaning as the two last arguments of macro @code{AC_PATH_PROG} for looking for the @command{erlc} program. For example, to look for @command{erlc} only in the @file{/usr/lib/erlang/bin} directory: @example AC_ERLANG_PATH_ERLC([not found], [/usr/lib/erlang/bin]) @end example @end defmac @defmac AC_ERLANG_NEED_ERLC (@dvar{path, $PATH}) @acindex{ERLANG_NEED_ERLC} A simplified variant of the @code{AC_ERLANG_PATH_ERLC} macro, that prints an error message and exits the @command{configure} script if the @command{erlc} program is not found. @end defmac @defmac AC_ERLANG_PATH_ERL (@ovar{value-if-not-found}, @dvar{path, $PATH}) @acindex{ERLANG_PATH_ERL} @evindex ERL @ovindex ERL Determine an Erlang interpreter to use. If @code{ERL} is not already set in the environment, check for @command{erl}. Set output variable @code{ERL} to the complete path of the interpreter command found. The two optional arguments have the same meaning as the two last arguments of macro @code{AC_PATH_PROG} for looking for the @command{erl} program. For example, to look for @command{erl} only in the @file{/usr/lib/erlang/bin} directory: @example AC_ERLANG_PATH_ERL([not found], [/usr/lib/erlang/bin]) @end example @end defmac @defmac AC_ERLANG_NEED_ERL (@dvar{path, $PATH}) @acindex{ERLANG_NEED_ERL} A simplified variant of the @code{AC_ERLANG_PATH_ERL} macro, that prints an error message and exits the @command{configure} script if the @command{erl} program is not found. @end defmac @node Fortran Compiler @subsection Fortran Compiler Characteristics @cindex Fortran @cindex F77 The Autoconf Fortran support is divided into two categories: legacy Fortran 77 macros (@code{F77}), and modern Fortran macros (@code{FC}). The former are intended for traditional Fortran 77 code, and have output variables like @code{F77}, @code{FFLAGS}, and @code{FLIBS}. The latter are for newer programs that can (or must) compile under the newer Fortran standards, and have output variables like @code{FC}, @code{FCFLAGS}, and @code{FCLIBS}. Except for the macros @code{AC_FC_SRCEXT}, @code{AC_FC_FREEFORM}, @code{AC_FC_FIXEDFORM}, and @code{AC_FC_LINE_LENGTH} (see below), the @code{FC} and @code{F77} macros behave almost identically, and so they are documented together in this section. @defmac AC_PROG_F77 (@ovar{compiler-search-list}) @acindex{PROG_F77} @evindex F77 @evindex FFLAGS @ovindex F77 @ovindex FFLAGS @caindex f77_compiler_gnu @caindex prog_f77_g Determine a Fortran 77 compiler to use. If @code{F77} is not already set in the environment, then check for @code{g77} and @code{f77}, and then some other names. Set the output variable @code{F77} to the name of the compiler found. This macro may, however, be invoked with an optional first argument which, if specified, must be a blank-separated list of Fortran 77 compilers to search for. This just gives the user an opportunity to specify an alternative search list for the Fortran 77 compiler. For example, if you didn't like the default order, then you could invoke @code{AC_PROG_F77} like this: @example AC_PROG_F77([fl32 f77 fort77 xlf g77 f90 xlf90]) @end example If using a compiler that supports GNU Fortran 77, set the shell variable @code{G77} to @samp{yes}. If the output variable @code{FFLAGS} was not already set in the environment, set it to @option{-g -02} for @code{g77} (or @option{-O2} where the GNU Fortran 77 compiler does not accept @option{-g}), or @option{-g} for other compilers. The result of the GNU test is cached in the @code{ac_cv_f77_compiler_gnu} variable, acceptance of @option{-g} in the @code{ac_cv_prog_f77_g} variable. @end defmac @defmac AC_PROG_FC (@ovar{compiler-search-list}, @ovar{dialect}) @acindex{PROG_FC} @evindex FC @evindex FCFLAGS @ovindex FC @ovindex FCFLAGS @caindex fc_compiler_gnu @caindex prog_fc_g Determine a Fortran compiler to use. If @code{FC} is not already set in the environment, then @code{dialect} is a hint to indicate what Fortran dialect to search for; the default is to search for the newest available dialect. Set the output variable @code{FC} to the name of the compiler found. By default, newer dialects are preferred over older dialects, but if @code{dialect} is specified then older dialects are preferred starting with the specified dialect. @code{dialect} can currently be one of Fortran 77, Fortran 90, or Fortran 95. However, this is only a hint of which compiler @emph{name} to prefer (e.g., @code{f90} or @code{f95}), and no attempt is made to guarantee that a particular language standard is actually supported. Thus, it is preferable that you avoid the @code{dialect} option, and use AC_PROG_FC only for code compatible with the latest Fortran standard. This macro may, alternatively, be invoked with an optional first argument which, if specified, must be a blank-separated list of Fortran compilers to search for, just as in @code{AC_PROG_F77}. If using a compiler that supports GNU Fortran, set the shell variable @code{GFC} to @samp{yes}. If the output variable @code{FCFLAGS} was not already set in the environment, then set it to @option{-g -02} for a GNU Fortran compiler (or @option{-O2} where the compiler does not accept @option{-g}), or @option{-g} for other compilers. The result of the GNU test is cached in the @code{ac_cv_fc_compiler_gnu} variable, acceptance of @option{-g} in the @code{ac_cv_prog_fc_g} variable. @end defmac @defmac AC_PROG_F77_C_O @defmacx AC_PROG_FC_C_O @acindex{PROG_F77_C_O} @acindex{PROG_FC_C_O} @cvindex F77_NO_MINUS_C_MINUS_O @cvindex FC_NO_MINUS_C_MINUS_O @caindex prog_f77_c_o @caindex prog_fc_c_o Test whether the Fortran compiler accepts the options @option{-c} and @option{-o} simultaneously, and define @code{F77_NO_MINUS_C_MINUS_O} or @code{FC_NO_MINUS_C_MINUS_O}, respectively, if it does not. The result of the test is cached in the @code{ac_cv_prog_f77_c_o} or @code{ac_cv_prog_fc_c_o} variable, respectively. @end defmac The following macros check for Fortran compiler characteristics. To check for characteristics not listed here, use @code{AC_COMPILE_IFELSE} (@pxref{Running the Compiler}) or @code{AC_RUN_IFELSE} (@pxref{Runtime}), making sure to first set the current language to Fortran 77 or Fortran via @code{AC_LANG([Fortran 77])} or @code{AC_LANG(Fortran)} (@pxref{Language Choice}). @defmac AC_F77_LIBRARY_LDFLAGS @defmacx AC_FC_LIBRARY_LDFLAGS @acindex{F77_LIBRARY_LDFLAGS} @ovindex FLIBS @acindex{FC_LIBRARY_LDFLAGS} @ovindex FCLIBS @caindex prog_f77_v @caindex prog_fc_v @caindex f77_libs @caindex fc_libs Determine the linker flags (e.g., @option{-L} and @option{-l}) for the @dfn{Fortran intrinsic and runtime libraries} that are required to successfully link a Fortran program or shared library. The output variable @code{FLIBS} or @code{FCLIBS} is set to these flags (which should be included after @code{LIBS} when linking). This macro is intended to be used in those situations when it is necessary to mix, e.g., C++ and Fortran source code in a single program or shared library (@pxref{Mixing Fortran 77 With C and C++, , , automake, GNU Automake}). For example, if object files from a C++ and Fortran compiler must be linked together, then the C++ compiler/linker must be used for linking (since special C++-ish things need to happen at link time like calling global constructors, instantiating templates, enabling exception support, etc.). However, the Fortran intrinsic and runtime libraries must be linked in as well, but the C++ compiler/linker doesn't know by default how to add these Fortran 77 libraries. Hence, this macro was created to determine these Fortran libraries. The macros @code{AC_F77_DUMMY_MAIN} and @code{AC_FC_DUMMY_MAIN} or @code{AC_F77_MAIN} and @code{AC_FC_MAIN} are probably also necessary to link C/C++ with Fortran; see below. Further, it is highly recommended that you use @code{AC_CONFIG_HEADERS} (@pxref{Configuration Headers}) because the complex defines that the function wrapper macros create may not work with C/C++ compiler drivers. These macros internally compute the flag needed to verbose linking output and cache it in @code{ac_cv_prog_f77_v} or @code{ac_cv_prog_fc_v} variables, respectively. The computed linker flags are cached in @code{ac_cv_f77_libs} or @code{ac_cv_fc_libs}, respectively. @end defmac @defmac AC_F77_DUMMY_MAIN (@ovar{action-if-found}, @ @dvar{action-if-not-found, AC_MSG_FAILURE}) @defmacx AC_FC_DUMMY_MAIN (@ovar{action-if-found}, @ @dvar{action-if-not-found, AC_MSG_FAILURE}) @acindex{F77_DUMMY_MAIN} @cvindex F77_DUMMY_MAIN @acindex{FC_DUMMY_MAIN} @cvindex FC_DUMMY_MAIN @caindex f77_dummy_main @caindex fc_dummy_main With many compilers, the Fortran libraries detected by @code{AC_F77_LIBRARY_LDFLAGS} or @code{AC_FC_LIBRARY_LDFLAGS} provide their own @code{main} entry function that initializes things like Fortran I/O, and which then calls a user-provided entry function named (say) @code{MAIN__} to run the user's program. The @code{AC_F77_DUMMY_MAIN} and @code{AC_FC_DUMMY_MAIN} or @code{AC_F77_MAIN} and @code{AC_FC_MAIN} macros figure out how to deal with this interaction. When using Fortran for purely numerical functions (no I/O, etc.)@: often one prefers to provide one's own @code{main} and skip the Fortran library initializations. In this case, however, one may still need to provide a dummy @code{MAIN__} routine in order to prevent linking errors on some systems. @code{AC_F77_DUMMY_MAIN} or @code{AC_FC_DUMMY_MAIN} detects whether any such routine is @emph{required} for linking, and what its name is; the shell variable @code{F77_DUMMY_MAIN} or @code{FC_DUMMY_MAIN} holds this name, @code{unknown} when no solution was found, and @code{none} when no such dummy main is needed. By default, @var{action-if-found} defines @code{F77_DUMMY_MAIN} or @code{FC_DUMMY_MAIN} to the name of this routine (e.g., @code{MAIN__}) @emph{if} it is required. @var{action-if-not-found} defaults to exiting with an error. In order to link with Fortran routines, the user's C/C++ program should then include the following code to define the dummy main if it is needed: @example @c If you change this example, adjust tests/fortran.at:AC_F77_DUMMY_MAIN usage. #ifdef F77_DUMMY_MAIN # ifdef __cplusplus extern "C" # endif int F77_DUMMY_MAIN () @{ return 1; @} #endif @end example (Replace @code{F77} with @code{FC} for Fortran instead of Fortran 77.) Note that this macro is called automatically from @code{AC_F77_WRAPPERS} or @code{AC_FC_WRAPPERS}; there is generally no need to call it explicitly unless one wants to change the default actions. The result of this macro is cached in the @code{ac_cv_f77_dummy_main} or @code{ac_cv_fc_dummy_main} variable, respectively. @end defmac @defmac AC_F77_MAIN @defmacx AC_FC_MAIN @acindex{F77_MAIN} @cvindex F77_MAIN @acindex{FC_MAIN} @cvindex FC_MAIN @caindex f77_main @caindex fc_main As discussed above, many Fortran libraries allow you to provide an entry point called (say) @code{MAIN__} instead of the usual @code{main}, which is then called by a @code{main} function in the Fortran libraries that initializes things like Fortran I/O@. The @code{AC_F77_MAIN} and @code{AC_FC_MAIN} macros detect whether it is @emph{possible} to utilize such an alternate main function, and defines @code{F77_MAIN} and @code{FC_MAIN} to the name of the function. (If no alternate main function name is found, @code{F77_MAIN} and @code{FC_MAIN} are simply defined to @code{main}.) Thus, when calling Fortran routines from C that perform things like I/O, one should use this macro and declare the "main" function like so: @example @c If you change this example, adjust tests/fortran.at:AC_F77_DUMMY_MAIN usage. #ifdef __cplusplus extern "C" #endif int F77_MAIN (int argc, char *argv[]); @end example (Again, replace @code{F77} with @code{FC} for Fortran instead of Fortran 77.) The result of this macro is cached in the @code{ac_cv_f77_main} or @code{ac_cv_fc_main} variable, respectively. @end defmac @defmac AC_F77_WRAPPERS @defmacx AC_FC_WRAPPERS @acindex{F77_WRAPPERS} @cvindex F77_FUNC @cvindex F77_FUNC_ @acindex{FC_WRAPPERS} @cvindex FC_FUNC @cvindex FC_FUNC_ @caindex f77_mangling @caindex fc_mangling Defines C macros @code{F77_FUNC (name, NAME)}, @code{FC_FUNC (name, NAME)}, @code{F77_FUNC_(name, NAME)}, and @code{FC_FUNC_(name, NAME)} to properly mangle the names of C/C++ identifiers, and identifiers with underscores, respectively, so that they match the name-mangling scheme used by the Fortran compiler. Fortran is case-insensitive, and in order to achieve this the Fortran compiler converts all identifiers into a canonical case and format. To call a Fortran subroutine from C or to write a C function that is callable from Fortran, the C program must explicitly use identifiers in the format expected by the Fortran compiler. In order to do this, one simply wraps all C identifiers in one of the macros provided by @code{AC_F77_WRAPPERS} or @code{AC_FC_WRAPPERS}. For example, suppose you have the following Fortran 77 subroutine: @example @c If you change this example, adjust tests/fortran.at:AC_F77_DUMMY_MAIN usage. subroutine foobar (x, y) double precision x, y y = 3.14159 * x return end @end example You would then declare its prototype in C or C++ as: @example @c If you change this example, adjust tests/fortran.at:AC_F77_DUMMY_MAIN usage. #define FOOBAR_F77 F77_FUNC (foobar, FOOBAR) #ifdef __cplusplus extern "C" /* prevent C++ name mangling */ #endif void FOOBAR_F77 (double *x, double *y); @end example Note that we pass both the lowercase and uppercase versions of the function name to @code{F77_FUNC} so that it can select the right one. Note also that all parameters to Fortran 77 routines are passed as pointers (@pxref{Mixing Fortran 77 With C and C++, , , automake, GNU Automake}). (Replace @code{F77} with @code{FC} for Fortran instead of Fortran 77.) Although Autoconf tries to be intelligent about detecting the name-mangling scheme of the Fortran compiler, there may be Fortran compilers that it doesn't support yet. In this case, the above code generates a compile-time error, but some other behavior (e.g., disabling Fortran-related features) can be induced by checking whether @code{F77_FUNC} or @code{FC_FUNC} is defined. Now, to call that routine from a C program, we would do something like: @example @c If you change this example, adjust tests/fortran.at:AC_F77_DUMMY_MAIN usage. @{ double x = 2.7183, y; FOOBAR_F77 (&x, &y); @} @end example If the Fortran identifier contains an underscore (e.g., @code{foo_bar}), you should use @code{F77_FUNC_} or @code{FC_FUNC_} instead of @code{F77_FUNC} or @code{FC_FUNC} (with the same arguments). This is because some Fortran compilers mangle names differently if they contain an underscore. The name mangling scheme is encoded in the @code{ac_cv_f77_mangling} or @code{ac_cv_fc_mangling} cache variable, respectively, and also used for the @code{AC_F77_FUNC} and @code{AC_FC_FUNC} macros described below. @end defmac @defmac AC_F77_FUNC (@var{name}, @ovar{shellvar}) @defmacx AC_FC_FUNC (@var{name}, @ovar{shellvar}) @acindex{F77_FUNC} @acindex{FC_FUNC} Given an identifier @var{name}, set the shell variable @var{shellvar} to hold the mangled version @var{name} according to the rules of the Fortran linker (see also @code{AC_F77_WRAPPERS} or @code{AC_FC_WRAPPERS}). @var{shellvar} is optional; if it is not supplied, the shell variable is simply @var{name}. The purpose of this macro is to give the caller a way to access the name-mangling information other than through the C preprocessor as above, for example, to call Fortran routines from some language other than C/C++. @end defmac @defmac AC_FC_SRCEXT (@var{ext}, @ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @defmacx AC_FC_PP_SRCEXT (@var{ext}, @ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{FC_SRCEXT} @acindex{FC_PP_SRCEXT} @caindex fc_srcext_@var{ext} @caindex fc_pp_srcext_@var{ext} By default, the @code{FC} macros perform their tests using a @file{.f} extension for source-code files. Some compilers, however, only enable newer language features for appropriately named files, e.g., Fortran 90 features only for @file{.f90} files, or preprocessing only with @file{.F} files or maybe other upper-case extensions. On the other hand, some other compilers expect all source files to end in @file{.f} and require special flags to support other file name extensions. The @code{AC_FC_SRCEXT} and @code{AC_FC_PP_SRCEXT} macros deal with these issues. The @code{AC_FC_SRCEXT} macro tries to get the @code{FC} compiler to accept files ending with the extension @file{.@var{ext}} (i.e., @var{ext} does @emph{not} contain the dot). If any special compiler flags are needed for this, it stores them in the output variable @code{FCFLAGS_@var{ext}}. This extension and these flags are then used for all subsequent @code{FC} tests (until @code{AC_FC_SRCEXT} or @code{AC_FC_PP_SRCEXT} is called another time). For example, you would use @code{AC_FC_SRCEXT(f90)} to employ the @file{.f90} extension in future tests, and it would set the @code{FCFLAGS_f90} output variable with any extra flags that are needed to compile such files. Similarly, the @code{AC_FC_PP_SRCEXT} macro tries to get the @code{FC} compiler to preprocess and compile files with the extension @file{.@var{ext}}. When both @command{fpp} and @command{cpp} style preprocessing are provided, the former is preferred, as the latter may treat continuation lines, @code{//} tokens, and white space differently from what some Fortran dialects expect. Conversely, if you do not want files to be preprocessed, use only lower-case characters in the file name extension. Like with @code{AC_FC_SRCEXT(f90)}, any needed flags are stored in the @code{FCFLAGS_@var{ext}} variable. The @code{FCFLAGS_@var{ext}} flags can @emph{not} be simply absorbed into @code{FCFLAGS}, for two reasons based on the limitations of some compilers. First, only one @code{FCFLAGS_@var{ext}} can be used at a time, so files with different extensions must be compiled separately. Second, @code{FCFLAGS_@var{ext}} must appear @emph{immediately} before the source-code file name when compiling. So, continuing the example above, you might compile a @file{foo.f90} file in your makefile with the command: @example foo.o: foo.f90 $(FC) -c $(FCFLAGS) $(FCFLAGS_f90) '$(srcdir)/foo.f90' @end example If @code{AC_FC_SRCEXT} or @code{AC_FC_PP_SRCEXT} succeeds in compiling files with the @var{ext} extension, it calls @var{action-if-success} (defaults to nothing). If it fails, and cannot find a way to make the @code{FC} compiler accept such files, it calls @var{action-if-failure} (defaults to exiting with an error message). The @code{AC_FC_SRCEXT} and @code{AC_FC_PP_SRCEXT} macros cache their results in @code{ac_cv_fc_srcext_@var{ext}} and @code{ac_cv_fc_pp_srcext_@var{ext}} variables, respectively. @end defmac @defmac AC_FC_PP_DEFINE (@ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{FC_PP_DEFINE} @caindex fc_pp_define Find a flag to specify defines for preprocessed Fortran. Not all Fortran compilers use @option{-D}. Substitute @code{FC_DEFINE} with the result and call @var{action-if-success} (defaults to nothing) if successful, and @var{action-if-failure} (defaults to failing with an error message) if not. This macro calls @code{AC_FC_PP_SRCEXT([F])} in order to learn how to preprocess a @file{conftest.F} file, but restores a previously used Fortran source file extension afterwards again. The result of this test is cached in the @code{ac_cv_fc_pp_define} variable. @end defmac @defmac AC_FC_FREEFORM (@ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{FC_FREEFORM} @caindex fc_freeform Try to ensure that the Fortran compiler (@code{$FC}) allows free-format source code (as opposed to the older fixed-format style from Fortran 77). If necessary, it may add some additional flags to @code{FCFLAGS}. This macro is most important if you are using the default @file{.f} extension, since many compilers interpret this extension as indicating fixed-format source unless an additional flag is supplied. If you specify a different extension with @code{AC_FC_SRCEXT}, such as @file{.f90}, then @code{AC_FC_FREEFORM} ordinarily succeeds without modifying @code{FCFLAGS}. For extensions which the compiler does not know about, the flag set by the @code{AC_FC_SRCEXT} macro might let the compiler assume Fortran 77 by default, however. If @code{AC_FC_FREEFORM} succeeds in compiling free-form source, it calls @var{action-if-success} (defaults to nothing). If it fails, it calls @var{action-if-failure} (defaults to exiting with an error message). The result of this test, or @samp{none} or @samp{unknown}, is cached in the @code{ac_cv_fc_freeform} variable. @end defmac @defmac AC_FC_FIXEDFORM (@ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{FC_FIXEDFORM} @caindex fc_fixedform Try to ensure that the Fortran compiler (@code{$FC}) allows the old fixed-format source code (as opposed to free-format style). If necessary, it may add some additional flags to @code{FCFLAGS}. This macro is needed for some compilers alias names like @command{xlf95} which assume free-form source code by default, and in case you want to use fixed-form source with an extension like @file{.f90} which many compilers interpret as free-form by default. If you specify a different extension with @code{AC_FC_SRCEXT}, such as @file{.f}, then @code{AC_FC_FIXEDFORM} ordinarily succeeds without modifying @code{FCFLAGS}. If @code{AC_FC_FIXEDFORM} succeeds in compiling fixed-form source, it calls @var{action-if-success} (defaults to nothing). If it fails, it calls @var{action-if-failure} (defaults to exiting with an error message). The result of this test, or @samp{none} or @samp{unknown}, is cached in the @code{ac_cv_fc_fixedform} variable. @end defmac @defmac AC_FC_LINE_LENGTH (@ovar{length}, @ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{FC_LINE_LENGTH} @caindex fc_line_length Try to ensure that the Fortran compiler (@code{$FC}) accepts long source code lines. The @var{length} argument may be given as 80, 132, or unlimited, and defaults to 132. Note that line lengths above 250 columns are not portable, and some compilers do not accept more than 132 columns at least for fixed format source. If necessary, it may add some additional flags to @code{FCFLAGS}. If @code{AC_FC_LINE_LENGTH} succeeds in compiling fixed-form source, it calls @var{action-if-success} (defaults to nothing). If it fails, it calls @var{action-if-failure} (defaults to exiting with an error message). The result of this test, or @samp{none} or @samp{unknown}, is cached in the @code{ac_cv_fc_line_length} variable. @end defmac @defmac AC_FC_CHECK_BOUNDS (@ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{FC_CHECK_BOUNDS} @caindex fc_check_bounds The @code{AC_FC_CHECK_BOUNDS} macro tries to enable array bounds checking in the Fortran compiler. If successful, the @var{action-if-success} is called and any needed flags are added to @code{FCFLAGS}. Otherwise, @var{action-if-failure} is called, which defaults to failing with an error message. The macro currently requires Fortran 90 or a newer dialect. The result of the macro is cached in the @code{ac_cv_fc_check_bounds} variable. @end defmac @defmac AC_F77_IMPLICIT_NONE (@ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @defmacx AC_FC_IMPLICIT_NONE (@ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{F77_IMPLICIT_NONE} @acindex{FC_IMPLICIT_NONE} @caindex f77_implicit_none @caindex fc_implicit_none Try to disallow implicit declarations in the Fortran compiler. If successful, @var{action-if-success} is called and any needed flags are added to @code{FFLAGS} or @code{FCFLAGS}, respectively. Otherwise, @var{action-if-failure} is called, which defaults to failing with an error message. The result of these macros are cached in the @code{ac_cv_f77_implicit_none} and @code{ac_cv_fc_implicit_none} variables, respectively. @end defmac @defmac AC_FC_MODULE_EXTENSION @acindex{FC_MODULE_EXTENSION} @caindex fc_module_ext @ovindex FC_MODEXT Find the Fortran 90 module file name extension. Most Fortran 90 compilers store module information in files separate from the object files. The module files are usually named after the name of the module rather than the source file name, with characters possibly turned to upper case, plus an extension, often @file{.mod}. Not all compilers use module files at all, or by default. The Cray Fortran compiler requires @option{-e m} in order to store and search module information in @file{.mod} files rather than in object files. Likewise, the Fujitsu Fortran compilers uses the @option{-Am} option to indicate how module information is stored. The @code{AC_FC_MODULE_EXTENSION} macro computes the module extension without the leading dot, and stores that in the @code{FC_MODEXT} variable. If the compiler does not produce module files, or the extension cannot be determined, @code{FC_MODEXT} is empty. Typically, the result of this macro may be used in cleanup @command{make} rules as follows: @example clean-modules: -test -z "$(FC_MODEXT)" || rm -f *.$(FC_MODEXT) @end example The extension, or @samp{unknown}, is cached in the @code{ac_cv_fc_module_ext} variable. @end defmac @defmac AC_FC_MODULE_FLAG (@ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{FC_MODULE_FLAG} @caindex fc_module_flag @ovindex FC_MODINC @ovindex ac_empty Find the compiler flag to include Fortran 90 module information from another directory, and store that in the @code{FC_MODINC} variable. Call @var{action-if-success} (defaults to nothing) if successful, and set @code{FC_MODINC} to empty and call @var{action-if-failure} (defaults to exiting with an error message) if not. Most Fortran 90 compilers provide a way to specify module directories. Some have separate flags for the directory to write module files to, and directories to search them in, whereas others only allow writing to the current directory or to the first directory specified in the include path. Further, with some compilers, the module search path and the preprocessor search path can only be modified with the same flag. Thus, for portability, write module files to the current directory only and list that as first directory in the search path. There may be no whitespace between @code{FC_MODINC} and the following directory name, but @code{FC_MODINC} may contain trailing white space. For example, if you use Automake and would like to search @file{../lib} for module files, you can use the following: @example AM_FCFLAGS = $(FC_MODINC). $(FC_MODINC)../lib @end example Inside @command{configure} tests, you can use: @example if test -n "$FC_MODINC"; then FCFLAGS="$FCFLAGS $FC_MODINC. $FC_MODINC../lib" fi @end example The flag is cached in the @code{ac_cv_fc_module_flag} variable. The substituted value of @code{FC_MODINC} may refer to the @code{ac_empty} dummy placeholder empty variable, to avoid losing the significant trailing whitespace in a @file{Makefile}. @end defmac @defmac AC_FC_MODULE_OUTPUT_FLAG (@ovar{action-if-success}, @ @dvar{action-if-failure, AC_MSG_FAILURE}) @acindex{FC_MODULE_OUTPUT_FLAG} @caindex fc_module_output_flag @ovindex FC_MODOUT Find the compiler flag to write Fortran 90 module information to another directory, and store that in the @code{FC_MODOUT} variable. Call @var{action-if-success} (defaults to nothing) if successful, and set @code{FC_MODOUT} to empty and call @var{action-if-failure} (defaults to exiting with an error message) if not. Not all Fortran 90 compilers write module files, and of those that do, not all allow writing to a directory other than the current one, nor do all have separate flags for writing and reading; see the description of @code{AC_FC_MODULE_FLAG} above. If you need to be able to write to another directory, for maximum portability use @code{FC_MODOUT} before any @code{FC_MODINC} and include both the current directory and the one you write to in the search path: @example AM_FCFLAGS = $(FC_MODOUT)../mod $(FC_MODINC)../mod $(FC_MODINC). @dots{} @end example The flag is cached in the @code{ac_cv_fc_module_output_flag} variable. The substituted value of @code{FC_MODOUT} may refer to the @code{ac_empty} dummy placeholder empty variable, to avoid losing the significant trailing whitespace in a @file{Makefile}. @end defmac @node Go Compiler @subsection Go Compiler Characteristics @cindex Go Autoconf provides basic support for the Go programming language when using the @code{gccgo} compiler (there is currently no support for the @code{6g} and @code{8g} compilers). @defmac AC_PROG_GO (@ovar{compiler-search-list}) Find the Go compiler to use. Check whether the environment variable @code{GOC} is set; if so, then set output variable @code{GOC} to its value. Otherwise, if the macro is invoked without an argument, then search for a Go compiler named @code{gccgo}. If it is not found, then as a last resort set @code{GOC} to @code{gccgo}. This macro may be invoked with an optional first argument which, if specified, must be a blank-separated list of Go compilers to search for. If output variable @code{GOFLAGS} was not already set, set it to @option{-g -O2}. If your package does not like this default, @code{GOFLAGS} may be set before @code{AC_PROG_GO}. @end defmac @node System Services @section System Services The following macros check for operating system services or capabilities. @anchor{AC_PATH_X} @defmac AC_PATH_X @acindex{PATH_X} @evindex XMKMF @cindex X Window System Try to locate the X Window System include files and libraries. If the user gave the command line options @option{--x-includes=@var{dir}} and @option{--x-libraries=@var{dir}}, use those directories. If either or both were not given, get the missing values by running @code{xmkmf} (or an executable pointed to by the @code{XMKMF} environment variable) on a trivial @file{Imakefile} and examining the makefile that it produces. Setting @code{XMKMF} to @samp{false} disables this method. If this method fails to find the X Window System, @command{configure} looks for the files in several directories where they often reside. If either method is successful, set the shell variables @code{x_includes} and @code{x_libraries} to their locations, unless they are in directories the compiler searches by default. If both methods fail, or the user gave the command line option @option{--without-x}, set the shell variable @code{no_x} to @samp{yes}; otherwise set it to the empty string. @end defmac @anchor{AC_PATH_XTRA} @defmac AC_PATH_XTRA @acindex{PATH_XTRA} @ovindex X_CFLAGS @ovindex X_LIBS @ovindex X_EXTRA_LIBS @ovindex X_PRE_LIBS @cvindex X_DISPLAY_MISSING An enhanced version of @code{AC_PATH_X}. It adds the C compiler flags that X needs to output variable @code{X_CFLAGS}, and the X linker flags to @code{X_LIBS}. Define @code{X_DISPLAY_MISSING} if X is not available. This macro also checks for special libraries that some systems need in order to compile X programs. It adds any that the system needs to output variable @code{X_EXTRA_LIBS}. And it checks for special X11R6 libraries that need to be linked with before @option{-lX11}, and adds any found to the output variable @code{X_PRE_LIBS}. @c This is an incomplete kludge. Make a real way to do it. @c If you need to check for other X functions or libraries yourself, then @c after calling this macro, add the contents of @code{X_EXTRA_LIBS} to @c @code{LIBS} temporarily, like this: (FIXME - add example) @end defmac @anchor{AC_SYS_INTERPRETER} @defmac AC_SYS_INTERPRETER @acindex{SYS_INTERPRETER} Check whether the system supports starting scripts with a line of the form @samp{#!/bin/sh} to select the interpreter to use for the script. After running this macro, shell code in @file{configure.ac} can check the shell variable @code{interpval}; it is set to @samp{yes} if the system supports @samp{#!}, @samp{no} if not. @end defmac @defmac AC_SYS_LARGEFILE @acindex{SYS_LARGEFILE} @cvindex _FILE_OFFSET_BITS @cvindex _LARGE_FILES @ovindex CC @cindex Large file support @cindex LFS Arrange for 64-bit file offsets, known as @uref{http://@/www.unix.org/@/version2/@/whatsnew/@/lfs20mar.html, large-file support}. On some hosts, one must use special compiler options to build programs that can access large files. Append any such options to the output variable @code{CC}. Define @code{_FILE_OFFSET_BITS} and @code{_LARGE_FILES} if necessary. Large-file support can be disabled by configuring with the @option{--disable-largefile} option. If you use this macro, check that your program works even when @code{off_t} is wider than @code{long int}, since this is common when large-file support is enabled. For example, it is not correct to print an arbitrary @code{off_t} value @code{X} with @code{printf ("%ld", (long int) X)}. Also, when using this macro in concert with @code{AC_CONFIG_HEADERS}, be sure that @file{config.h} is included before any system header. The LFS introduced the @code{fseeko} and @code{ftello} functions to replace their C counterparts @code{fseek} and @code{ftell} that do not use @code{off_t}. Take care to use @code{AC_FUNC_FSEEKO} to make their prototypes available when using them and large-file support is enabled. @end defmac @anchor{AC_SYS_LONG_FILE_NAMES} @defmac AC_SYS_LONG_FILE_NAMES @acindex{SYS_LONG_FILE_NAMES} @cvindex HAVE_LONG_FILE_NAMES If the system supports file names longer than 14 characters, define @code{HAVE_LONG_FILE_NAMES}. @end defmac @defmac AC_SYS_POSIX_TERMIOS @acindex{SYS_POSIX_TERMIOS} @cindex Posix termios headers @cindex termios Posix headers @caindex sys_posix_termios Check to see if the Posix termios headers and functions are available on the system. If so, set the shell variable @code{ac_cv_sys_posix_termios} to @samp{yes}. If not, set the variable to @samp{no}. @end defmac @node C and Posix Variants @section C and Posix Variants The following macro makes it possible to use C language and library extensions defined by the C standards committee, features of Posix that are extensions to C, and platform extensions not defined by Posix. @anchor{AC_USE_SYSTEM_EXTENSIONS} @defmac AC_USE_SYSTEM_EXTENSIONS @acindex{USE_SYSTEM_EXTENSIONS} If possible, enable extensions to C or Posix on hosts that normally disable the extensions, typically due to standards-conformance namespace issues. This should be called before any macros that run the C compiler. Also, when using this macro in concert with @code{AC_CONFIG_HEADERS}, be sure that @file{config.h} is included before any system header. The following preprocessor macros are defined unconditionally: @table @code @item _ALL_SOURCE @cvindex _ALL_SOURCE Enable extensions on AIX 3 and Interix. @item _DARWIN_C_SOURCE @cvindex _DARWIN_C_SOURCE Enable extensions on macOS. @item _GNU_SOURCE @cvindex _GNU_SOURCE Enable extensions on GNU systems. @item _NETBSD_SOURCE @cvindex _NETBSD_SOURCE Enable general extensions on NetBSD. Enable NetBSD compatibility extensions on Minix. @item _OPENBSD_SOURCE @cvindex _OPENBSD_SOURCE Enable OpenBSD compatibility extensions on NetBSD. Oddly enough, this does nothing on OpenBSD. @item _POSIX_PTHREAD_SEMANTICS @cvindex _POSIX_PTHREAD_SEMANTICS Enable Posix-compatible threading on Solaris. @item __STDC_WANT_IEC_60559_ATTRIBS_EXT__ @cvindex __STDC_WANT_IEC_60559_ATTRIBS_EXT__ Enable extensions specified by ISO/IEC TS 18661-5:2014. @item __STDC_WANT_IEC_60559_BFP_EXT__ @cvindex __STDC_WANT_IEC_60559_BFP_EXT__ Enable extensions specified by ISO/IEC TS 18661-1:2014. @item __STDC_WANT_IEC_60559_DFP_EXT__ @cvindex __STDC_WANT_IEC_60559_DFP_EXT__ Enable extensions specified by ISO/IEC TS 18661-2:2015. @item __STDC_WANT_IEC_60559_FUNCS_EXT__ @cvindex __STDC_WANT_IEC_60559_FUNCS_EXT__ Enable extensions specified by ISO/IEC TS 18661-4:2015. @item __STDC_WANT_IEC_60559_TYPES_EXT__ @cvindex __STDC_WANT_IEC_60559_TYPES_EXT__ Enable extensions specified by ISO/IEC TS 18661-3:2015. @item __STDC_WANT_LIB_EXT2__ @cvindex __STDC_WANT_LIB_EXT2__ Enable extensions specified by ISO/IEC TR 24731-2:2010. @item __STDC_WANT_MATH_SPEC_FUNCS__ @cvindex __STDC_WANT_MATH_SPEC_FUNCS__ Enable extensions specified by ISO/IEC 24747:2009. @item _TANDEM_SOURCE @cvindex _TANDEM_SOURCE Enable extensions on HP NonStop systems. @end table The following preprocessor macros are defined only when necessary; they enable access to extensions on some operating systems but @emph{disable} extensions on other operating systems. @table @code @item __EXTENSIONS__ @cvindex __EXTENSIONS__ Enable general extensions on Solaris. This macro is defined only if the headers included by @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}) work correctly with it defined. @item _MINIX @itemx _POSIX_SOURCE @itemx _POSIX_1_SOURCE @cvindex _MINIX @cvindex _POSIX_SOURCE @cvindex _POSIX_1_SOURCE Defined only on MINIX. @code{_POSIX_SOURCE} and @code{_POSIX_1_SOURCE} are needed to enable a number of POSIX features on this OS. @code{_MINIX} does not affect the system headers' behavior; future versions of Autoconf may stop defining it. Programs that need to recognize Minix should use @code{AC_CANONICAL_HOST}. @item _XOPEN_SOURCE @cvindex _XOPEN_SOURCE Defined (with value 500) only if needed to make @file{wchar.h} declare @code{mbstate_t}. This is known to be necessary on some versions of HP/UX. @end table @cvindex __STDC_WANT_DEC_FP__ The C preprocessor macro @code{__STDC_WANT_DEC_FP__} is not defined. ISO/IEC TR 24732:2009 was superseded by ISO/IEC TS 18661-2:2015. @cvindex __STDC_WANT_LIB_EXT1__ The C preprocessor macro @code{__STDC_WANT_LIB_EXT1__} is not defined, as C11 Annex K is problematic. See: O'Donell C, Sebor M. @uref{http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1967.htm, Field Experience With Annex K---Bounds Checking Interfaces}. The Autoconf macro @code{AC_USE_SYSTEM_EXTENSIONS} was introduced in Autoconf 2.60. @end defmac @node Erlang Libraries @section Erlang Libraries @cindex Erlang, Library, checking The following macros check for an installation of Erlang/OTP, and for the presence of certain Erlang libraries. All those macros require the configuration of an Erlang interpreter and an Erlang compiler (@pxref{Erlang Compiler and Interpreter}). @defmac AC_ERLANG_SUBST_ERTS_VER @acindex{ERLANG_SUBST_ERTS_VER} @ovindex ERLANG_ERTS_VER Set the output variable @code{ERLANG_ERTS_VER} to the version of the Erlang runtime system (as returned by Erlang's @code{erlang:system_info(version)} function). The result of this test is cached if caching is enabled when running @command{configure}. The @code{ERLANG_ERTS_VER} variable is not intended to be used for testing for features of specific ERTS versions, but to be used for substituting the ERTS version in Erlang/OTP release resource files (@code{.rel} files), as shown below. @end defmac @defmac AC_ERLANG_SUBST_ROOT_DIR @acindex{ERLANG_SUBST_ROOT_DIR} @ovindex ERLANG_ROOT_DIR Set the output variable @code{ERLANG_ROOT_DIR} to the path to the base directory in which Erlang/OTP is installed (as returned by Erlang's @code{code:root_dir/0} function). The result of this test is cached if caching is enabled when running @command{configure}. @end defmac @defmac AC_ERLANG_SUBST_LIB_DIR @acindex{ERLANG_SUBST_LIB_DIR} @ovindex ERLANG_LIB_DIR Set the output variable @code{ERLANG_LIB_DIR} to the path of the library directory of Erlang/OTP (as returned by Erlang's @code{code:lib_dir/0} function), which subdirectories each contain an installed Erlang/OTP library. The result of this test is cached if caching is enabled when running @command{configure}. @end defmac @defmac AC_ERLANG_CHECK_LIB (@var{library}, @ovar{action-if-found}, @ @ovar{action-if-not-found}) @acindex{ERLANG_CHECK_LIB} @ovindex ERLANG_LIB_DIR_@var{library} @ovindex ERLANG_LIB_VER_@var{library} Test whether the Erlang/OTP library @var{library} is installed by calling Erlang's @code{code:lib_dir/1} function. The result of this test is cached if caching is enabled when running @command{configure}. @var{action-if-found} is a list of shell commands to run if the library is installed; @var{action-if-not-found} is a list of shell commands to run if it is not. Additionally, if the library is installed, the output variable @samp{ERLANG_LIB_DIR_@var{library}} is set to the path to the library installation directory, and the output variable @samp{ERLANG_LIB_VER_@var{library}} is set to the version number that is part of the subdirectory name, if it is in the standard form (@code{@var{library}-@var{version}}). If the directory name does not have a version part, @samp{ERLANG_LIB_VER_@var{library}} is set to the empty string. If the library is not installed, @samp{ERLANG_LIB_DIR_@var{library}} and @samp{ERLANG_LIB_VER_@var{library}} are set to @code{"not found"}. For example, to check if library @code{stdlib} is installed: @example AC_ERLANG_CHECK_LIB([stdlib], [echo "stdlib version \"$ERLANG_LIB_VER_stdlib\"" echo "is installed in \"$ERLANG_LIB_DIR_stdlib\""], [AC_MSG_ERROR([stdlib was not found!])]) @end example The @samp{ERLANG_LIB_VER_@var{library}} variables (set by @code{AC_ERLANG_CHECK_LIB}) and the @code{ERLANG_ERTS_VER} variable (set by @code{AC_ERLANG_SUBST_ERTS_VER}) are not intended to be used for testing for features of specific versions of libraries or of the Erlang runtime system. Those variables are intended to be substituted in Erlang release resource files (@code{.rel} files). For instance, to generate a @file{example.rel} file for an application depending on the @code{stdlib} library, @file{configure.ac} could contain: @example AC_ERLANG_SUBST_ERTS_VER AC_ERLANG_CHECK_LIB([stdlib], [], [AC_MSG_ERROR([stdlib was not found!])]) AC_CONFIG_FILES([example.rel]) @end example @noindent The @file{example.rel.in} file used to generate @file{example.rel} should contain: @example @{release, @{"@@PACKAGE@@", "@@VERSION@@"@}, @{erts, "@@ERLANG_ERTS_VER@@"@}, [@{stdlib, "@@ERLANG_LIB_VER_stdlib@@"@}, @{@@PACKAGE@@, "@@VERSION@@"@}]@}. @end example @end defmac In addition to the above macros, which test installed Erlang libraries, the following macros determine the paths to the directories into which newly built Erlang libraries are to be installed: @defmac AC_ERLANG_SUBST_INSTALL_LIB_DIR @acindex{ERLANG_SUBST_INSTALL_LIB_DIR} @ovindex ERLANG_INSTALL_LIB_DIR Set the @code{ERLANG_INSTALL_LIB_DIR} output variable to the directory into which every built Erlang library should be installed in a separate subdirectory. If this variable is not set in the environment when @command{configure} runs, its default value is @code{$@{libdir@}/erlang/lib}. @end defmac @defmac AC_ERLANG_SUBST_INSTALL_LIB_SUBDIR (@var{library}, @var{version}) @acindex{ERLANG_SUBST_INSTALL_LIB_SUBDIR} @ovindex ERLANG_INSTALL_LIB_DIR_@var{library} Set the @samp{ERLANG_INSTALL_LIB_DIR_@var{library}} output variable to the directory into which the built Erlang library @var{library} version @var{version} should be installed. If this variable is not set in the environment when @command{configure} runs, its default value is @samp{$ERLANG_INSTALL_LIB_DIR/@var{library}-@var{version}}, the value of the @code{ERLANG_INSTALL_LIB_DIR} variable being set by the @code{AC_ERLANG_SUBST_INSTALL_LIB_DIR} macro. @end defmac @c ========================================================= Writing Tests @node Writing Tests @chapter Writing Tests If the existing feature tests don't do something you need, you have to write new ones. These macros are the building blocks. They provide ways for other macros to check whether various kinds of features are available and report the results. This chapter contains some suggestions and some of the reasons why the existing tests are written the way they are. You can also learn a lot about how to write Autoconf tests by looking at the existing ones. If something goes wrong in one or more of the Autoconf tests, this information can help you understand the assumptions behind them, which might help you figure out how to best solve the problem. These macros check the output of the compiler system of the current language (@pxref{Language Choice}). They do not cache the results of their tests for future use (@pxref{Caching Results}), because they don't know enough about the information they are checking for to generate a cache variable name. They also do not print any messages, for the same reason. The checks for particular kinds of features call these macros and do cache their results and print messages about what they're checking for. When you write a feature test that could be applicable to more than one software package, the best thing to do is encapsulate it in a new macro. @xref{Writing Autoconf Macros}, for how to do that. @menu * Language Choice:: Selecting which language to use for testing * Writing Test Programs:: Forging source files for compilers * Running the Preprocessor:: Detecting preprocessor symbols * Running the Compiler:: Detecting language or header features * Running the Linker:: Detecting library features * Runtime:: Testing for runtime features * Systemology:: A zoology of operating systems * Multiple Cases:: Tests for several possible values @end menu @node Language Choice @section Language Choice @cindex Language Autoconf-generated @command{configure} scripts check for the C compiler and its features by default. Packages that use other programming languages (maybe more than one, e.g., C and C++) need to test features of the compilers for the respective languages. The following macros determine which programming language is used in the subsequent tests in @file{configure.ac}. @anchor{AC_LANG} @defmac AC_LANG (@var{language}) @acindex{LANG} Do compilation tests using the compiler, preprocessor, and file extensions for the specified @var{language}. Supported languages are: @table @samp @item C Do compilation tests using @code{CC} and @code{CPP} and use extension @file{.c} for test programs. Use compilation flags: @code{CPPFLAGS} with @code{CPP}, and both @code{CPPFLAGS} and @code{CFLAGS} with @code{CC}. @item C++ Do compilation tests using @code{CXX} and @code{CXXCPP} and use extension @file{.C} for test programs. Use compilation flags: @code{CPPFLAGS} with @code{CXXCPP}, and both @code{CPPFLAGS} and @code{CXXFLAGS} with @code{CXX}. @item Fortran 77 Do compilation tests using @code{F77} and use extension @file{.f} for test programs. Use compilation flags: @code{FFLAGS}. @item Fortran Do compilation tests using @code{FC} and use extension @file{.f} (or whatever has been set by @code{AC_FC_SRCEXT}) for test programs. Use compilation flags: @code{FCFLAGS}. @item Erlang @ovindex ERLC @ovindex ERL @ovindex ERLCFLAGS Compile and execute tests using @code{ERLC} and @code{ERL} and use extension @file{.erl} for test Erlang modules. Use compilation flags: @code{ERLCFLAGS}. @item Objective C Do compilation tests using @code{OBJC} and @code{OBJCPP} and use extension @file{.m} for test programs. Use compilation flags: @code{CPPFLAGS} with @code{OBJCPP}, and both @code{CPPFLAGS} and @code{OBJCFLAGS} with @code{OBJC}. @item Objective C++ Do compilation tests using @code{OBJCXX} and @code{OBJCXXCPP} and use extension @file{.mm} for test programs. Use compilation flags: @code{CPPFLAGS} with @code{OBJCXXCPP}, and both @code{CPPFLAGS} and @code{OBJCXXFLAGS} with @code{OBJCXX}. @item Go Do compilation tests using @code{GOC} and use extension @file{.go} for test programs. Use compilation flags @code{GOFLAGS}. @end table @end defmac @anchor{AC_LANG_PUSH} @defmac AC_LANG_PUSH (@var{language}) @acindex{LANG_PUSH} Remember the current language (as set by @code{AC_LANG}) on a stack, and then select the @var{language}. Use this macro and @code{AC_LANG_POP} in macros that need to temporarily switch to a particular language. @end defmac @defmac AC_LANG_POP (@ovar{language}) @acindex{LANG_POP} Select the language that is saved on the top of the stack, as set by @code{AC_LANG_PUSH}, and remove it from the stack. If given, @var{language} specifies the language we just @emph{quit}. It is a good idea to specify it when it's known (which should be the case@dots{}), since Autoconf detects inconsistencies. @example AC_LANG_PUSH([Fortran 77]) # Perform some tests on Fortran 77. # @dots{} AC_LANG_POP([Fortran 77]) @end example @end defmac @defmac AC_LANG_ASSERT (@var{language}) @acindex{LANG_ASSERT} Check statically that the current language is @var{language}. You should use this in your language specific macros to avoid that they be called with an inappropriate language. This macro runs only at @command{autoconf} time, and incurs no cost at @command{configure} time. Sadly enough and because Autoconf is a two layer language @footnote{Because M4 is not aware of Sh code, especially conditionals, some optimizations that look nice statically may produce incorrect results at runtime.}, the macros @code{AC_LANG_PUSH} and @code{AC_LANG_POP} cannot be ``optimizing'', therefore as much as possible you ought to avoid using them to wrap your code, rather, require from the user to run the macro with a correct current language, and check it with @code{AC_LANG_ASSERT}. And anyway, that may help the user understand she is running a Fortran macro while expecting a result about her Fortran 77 compiler@enddots{} @end defmac @defmac AC_REQUIRE_CPP @acindex{REQUIRE_CPP} Ensure that whichever preprocessor would currently be used for tests has been found. Calls @code{AC_REQUIRE} (@pxref{Prerequisite Macros}) with an argument of either @code{AC_PROG_CPP} or @code{AC_PROG_CXXCPP}, depending on which language is current. @end defmac @node Writing Test Programs @section Writing Test Programs Autoconf tests follow a common scheme: feed some program with some input, and most of the time, feed a compiler with some source file. This section is dedicated to these source samples. @menu * Guidelines:: General rules for writing test programs * Test Functions:: Avoiding pitfalls in test programs * Generating Sources:: Source program boilerplate @end menu @node Guidelines @subsection Guidelines for Test Programs The most important rule to follow when writing testing samples is: @center @emph{Look for realism.} This motto means that testing samples must be written with the same strictness as real programs are written. In particular, you should avoid ``shortcuts'' and simplifications. Don't just play with the preprocessor if you want to prepare a compilation. For instance, using @command{cpp} to check whether a header is functional might let your @command{configure} accept a header which causes some @emph{compiler} error. Do not hesitate to check a header with other headers included before, especially required headers. Make sure the symbols you use are properly defined, i.e., refrain from simply declaring a function yourself instead of including the proper header. Test programs should not write to standard output. They should exit with status 0 if the test succeeds, and with status 1 otherwise, so that success can be distinguished easily from a core dump or other failure; segmentation violations and other failures produce a nonzero exit status. Unless you arrange for @code{exit} to be declared, test programs should @code{return}, not @code{exit}, from @code{main}, because on many systems @code{exit} is not declared by default. Test programs can use @code{#if} or @code{#ifdef} to check the values of preprocessor macros defined by tests that have already run. For example, if you call @code{AC_HEADER_STDBOOL}, then later on in @file{configure.ac} you can have a test program that includes @file{stdbool.h} conditionally: @example @group #ifdef HAVE_STDBOOL_H # include <stdbool.h> #endif @end group @end example Both @code{#if HAVE_STDBOOL_H} and @code{#ifdef HAVE_STDBOOL_H} will work with any standard C compiler. Some developers prefer @code{#if} because it is easier to read, while others prefer @code{#ifdef} because it avoids diagnostics with picky compilers like GCC with the @option{-Wundef} option. If a test program needs to use or create a data file, give it a name that starts with @file{conftest}, such as @file{conftest.data}. The @command{configure} script cleans up by running @samp{rm -f -r conftest*} after running test programs and if the script is interrupted. @node Test Functions @subsection Test Functions These days it's safe to assume support for function prototypes (introduced in C89). Functions that test programs declare should also be conditionalized for C++, which requires @samp{extern "C"} prototypes. Make sure to not include any header files containing clashing prototypes. @example #ifdef __cplusplus extern "C" #endif void *valloc (size_t); @end example If a test program calls a function with invalid parameters (just to see whether it exists), organize the program to ensure that it never invokes that function. You can do this by calling it in another function that is never invoked. You can't do it by putting it after a call to @code{exit}, because GCC version 2 knows that @code{exit} never returns and optimizes out any code that follows it in the same block. If you include any header files, be sure to call the functions relevant to them with the correct number of arguments, even if they are just 0, to avoid compilation errors due to prototypes. GCC version 2 has internal prototypes for several functions that it automatically inlines; for example, @code{memcpy}. To avoid errors when checking for them, either pass them the correct number of arguments or redeclare them with a different return type (such as @code{char}). @node Generating Sources @subsection Generating Sources Autoconf provides a set of macros that can be used to generate test source files. They are written to be language generic, i.e., they actually depend on the current language (@pxref{Language Choice}) to ``format'' the output properly. @defmac AC_LANG_CONFTEST (@var{source}) @acindex{LANG_CONFTEST} Save the @var{source} text in the current test source file: @file{conftest.@var{extension}} where the @var{extension} depends on the current language. As of Autoconf 2.63b, the source file also contains the results of all of the @code{AC_DEFINE} performed so far. Note that the @var{source} is evaluated exactly once, like regular Autoconf macro arguments, and therefore (i) you may pass a macro invocation, (ii) if not, be sure to double quote if needed. This macro issues a warning during @command{autoconf} processing if @var{source} does not include an expansion of the macro @code{AC_LANG_DEFINES_PROVIDED} (note that both @code{AC_LANG_SOURCE} and @code{AC_LANG_PROGRAM} call this macro, and thus avoid the warning). This macro is seldom called directly, but is used under the hood by more common macros such as @code{AC_COMPILE_IFELSE} and @code{AC_RUN_IFELSE}. @end defmac @defmac AC_LANG_DEFINES_PROVIDED @acindex{LANG_DEFINES_PROVIDED} This macro is called as a witness that the file @file{conftest.@var{extension}} appropriate for the current language is complete, including all previously determined results from @code{AC_DEFINE}. This macro is seldom called directly, but exists if you have a compelling reason to write a conftest file without using @code{AC_LANG_SOURCE}, yet still want to avoid a syntax warning from @code{AC_LANG_CONFTEST}. @end defmac @defmac AC_LANG_SOURCE (@var{source}) @acindex{LANG_SOURCE} Expands into the @var{source}, with the definition of all the @code{AC_DEFINE} performed so far. This macro includes an expansion of @code{AC_LANG_DEFINES_PROVIDED}. In many cases, you may find it more convenient to use the wrapper @code{AC_LANG_PROGRAM}. @end defmac For instance, executing (observe the double quotation!): @example @c If you change this example, adjust tests/compile.at:AC_LANG_SOURCE example. AC_INIT([Hello], [1.0], [bug-hello@@example.org], [], [https://www.example.org/]) AC_DEFINE([HELLO_WORLD], ["Hello, World\n"], [Greetings string.]) AC_LANG([C]) AC_LANG_CONFTEST( [AC_LANG_SOURCE([[const char hw[] = "Hello, World\n";]])]) gcc -E -dD conftest.c @end example @noindent on a system with @command{gcc} installed, results in: @example @c If you change this example, adjust tests/compile.at:AC_LANG_SOURCE example. @dots{} @asis{#} 1 "conftest.c" #define PACKAGE_NAME "Hello" #define PACKAGE_TARNAME "hello" #define PACKAGE_VERSION "1.0" #define PACKAGE_STRING "Hello 1.0" #define PACKAGE_BUGREPORT "bug-hello@@example.org" #define PACKAGE_URL "https://www.example.org/" #define HELLO_WORLD "Hello, World\n" const char hw[] = "Hello, World\n"; @end example When the test language is Fortran, Erlang, or Go, the @code{AC_DEFINE} definitions are not automatically translated into constants in the source code by this macro. @defmac AC_LANG_PROGRAM (@var{prologue}, @var{body}) @acindex{LANG_PROGRAM} Expands into a source file which consists of the @var{prologue}, and then @var{body} as body of the main function (e.g., @code{main} in C). Since it uses @code{AC_LANG_SOURCE}, the features of the latter are available. @end defmac For instance: @example @c If you change this example, adjust tests/compile.at:AC_LANG_PROGRAM example. AC_INIT([Hello], [1.0], [bug-hello@@example.org], [], [https://www.example.org/]) AC_DEFINE([HELLO_WORLD], ["Hello, World\n"], [Greetings string.]) AC_LANG_CONFTEST( [AC_LANG_PROGRAM([[const char hw[] = "Hello, World\n";]], [[fputs (hw, stdout);]])]) gcc -E -dD conftest.c @end example @noindent on a system with @command{gcc} installed, results in: @example @c If you change this example, adjust tests/compile.at:AC_LANG_PROGRAM example. @dots{} @asis{#} 1 "conftest.c" #define PACKAGE_NAME "Hello" #define PACKAGE_TARNAME "hello" #define PACKAGE_VERSION "1.0" #define PACKAGE_STRING "Hello 1.0" #define PACKAGE_BUGREPORT "bug-hello@@example.org" #define PACKAGE_URL "https://www.example.org/" #define HELLO_WORLD "Hello, World\n" const char hw[] = "Hello, World\n"; int main (void) @{ fputs (hw, stdout); ; return 0; @} @end example In Erlang tests, the created source file is that of an Erlang module called @code{conftest} (@file{conftest.erl}). This module defines and exports at least one @code{start/0} function, which is called to perform the test. The @var{prologue} is optional code that is inserted between the module header and the @code{start/0} function definition. @var{body} is the body of the @code{start/0} function without the final period (@pxref{Runtime}, about constraints on this function's behavior). For instance: @example AC_INIT([Hello], [1.0], [bug-hello@@example.org]) AC_LANG(Erlang) AC_LANG_CONFTEST( [AC_LANG_PROGRAM([[-define(HELLO_WORLD, "Hello, world!").]], [[io:format("~s~n", [?HELLO_WORLD])]])]) cat conftest.erl @end example @noindent results in: @example -module(conftest). -export([start/0]). -define(HELLO_WORLD, "Hello, world!"). start() -> io:format("~s~n", [?HELLO_WORLD]) . @end example @defmac AC_LANG_CALL (@var{prologue}, @var{function}) @acindex{LANG_CALL} Expands into a source file which consists of the @var{prologue}, and then a call to the @var{function} as body of the main function (e.g., @code{main} in C). Since it uses @code{AC_LANG_PROGRAM}, the feature of the latter are available. This function will probably be replaced in the future by a version which would enable specifying the arguments. The use of this macro is not encouraged, as it violates strongly the typing system. This macro cannot be used for Erlang tests. @end defmac @defmac AC_LANG_FUNC_LINK_TRY (@var{function}) @acindex{LANG_FUNC_LINK_TRY} Expands into a source file which uses the @var{function} in the body of the main function (e.g., @code{main} in C). Since it uses @code{AC_LANG_PROGRAM}, the features of the latter are available. As @code{AC_LANG_CALL}, this macro is documented only for completeness. It is considered to be severely broken, and in the future will be removed in favor of actual function calls (with properly typed arguments). This macro cannot be used for Erlang tests. @end defmac @node Running the Preprocessor @section Running the Preprocessor Sometimes one might need to run the preprocessor on some source file. @emph{Usually it is a bad idea}, as you typically need to @emph{compile} your project, not merely run the preprocessor on it; therefore you certainly want to run the compiler, not the preprocessor. Resist the temptation of following the easiest path. Nevertheless, if you need to run the preprocessor, then use @code{AC_PREPROC_IFELSE}. The macros described in this section cannot be used for tests in Erlang, Fortran, or Go, since those languages require no preprocessor. @anchor{AC_PREPROC_IFELSE} @defmac AC_PREPROC_IFELSE (@var{input}, @ovar{action-if-true}, @ @ovar{action-if-false}) @acindex{PREPROC_IFELSE} Run the preprocessor of the current language (@pxref{Language Choice}) on the @var{input}, run the shell commands @var{action-if-true} on success, @var{action-if-false} otherwise. The @var{input} can be made by @code{AC_LANG_PROGRAM} and friends. This macro uses @code{CPPFLAGS}, but not @code{CFLAGS}, because @option{-g}, @option{-O}, etc.@: are not valid options to many C preprocessors. It is customary to report unexpected failures with @code{AC_MSG_FAILURE}. If needed, @var{action-if-true} can further access the preprocessed output in the file @file{conftest.i}. @end defmac For instance: @example AC_INIT([Hello], [1.0], [bug-hello@@example.org]) AC_DEFINE([HELLO_WORLD], ["Hello, World\n"], [Greetings string.]) AC_PREPROC_IFELSE( [AC_LANG_PROGRAM([[const char hw[] = "Hello, World\n";]], [[fputs (hw, stdout);]])], [AC_MSG_RESULT([OK])], [AC_MSG_FAILURE([unexpected preprocessor failure])]) @end example @noindent might result in: @example checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.out checking for suffix of executables... checking whether we are cross compiling... no checking for suffix of object files... o checking whether the compiler supports GNU C... yes checking whether gcc accepts -g... yes checking for gcc option to enable C11 features... -std=gnu11 checking how to run the C preprocessor... gcc -std=gnu11 -E OK @end example @sp 1 The macro @code{AC_TRY_CPP} (@pxref{Obsolete Macros}) used to play the role of @code{AC_PREPROC_IFELSE}, but double quotes its argument, making it impossible to use it to elaborate sources. You are encouraged to get rid of your old use of the macro @code{AC_TRY_CPP} in favor of @code{AC_PREPROC_IFELSE}, but, in the first place, are you sure you need to run the @emph{preprocessor} and not the compiler? @anchor{AC_EGREP_HEADER} @defmac AC_EGREP_HEADER (@var{pattern}, @var{header-file}, @ @var{action-if-found}, @ovar{action-if-not-found}) @acindex{EGREP_HEADER} If the output of running the preprocessor on the system header file @var{header-file} matches the extended regular expression @var{pattern}, execute shell commands @var{action-if-found}, otherwise execute @var{action-if-not-found}. See below for some problems involving this macro. @end defmac @anchor{AC_EGREP_CPP} @defmac AC_EGREP_CPP (@var{pattern}, @var{program}, @ @ovar{action-if-found}, @ovar{action-if-not-found}) @acindex{EGREP_CPP} @var{program} is the text of a C or C++ program, on which shell variable, back quote, and backslash substitutions are performed. If the output of running the preprocessor on @var{program} matches the extended regular expression @var{pattern}, execute shell commands @var{action-if-found}, otherwise execute @var{action-if-not-found}. See below for some problems involving this macro. @end defmac @code{AC_EGREP_CPP} and @code{AC_EGREP_HEADER} should be used with care, as preprocessors can insert line breaks between output tokens. For example, the preprocessor might transform this: @example #define MAJOR 2 #define MINOR 23 Version MAJOR . MINOR @end example @noindent into this: @example Version 2 . 23 @end example @noindent Because preprocessors are allowed to insert white space, change escapes in string contants, insert backlash-newline pairs, or do any of a number of things that do not change the meaning of the preprocessed program, it is better to rely on @code{AC_PREPROC_IFELSE} than to resort to @code{AC_EGREP_CPP} or @code{AC_EGREP_HEADER}. @node Running the Compiler @section Running the Compiler To check for a syntax feature of the current language's (@pxref{Language Choice}) compiler, such as whether it recognizes a certain keyword, or simply to try some library feature, use @code{AC_COMPILE_IFELSE} to try to compile a small program that uses that feature. @defmac AC_COMPILE_IFELSE (@var{input}, @ovar{action-if-true}, @ @ovar{action-if-false}) @acindex{COMPILE_IFELSE} Run the compiler and compilation flags of the current language (@pxref{Language Choice}) on the @var{input}, run the shell commands @var{action-if-true} on success, @var{action-if-false} otherwise. The @var{input} can be made by @code{AC_LANG_PROGRAM} and friends. It is customary to report unexpected failures with @code{AC_MSG_FAILURE}. This macro does not try to link; use @code{AC_LINK_IFELSE} if you need to do that (@pxref{Running the Linker}). If needed, @var{action-if-true} can further access the just-compiled object file @file{conftest.$OBJEXT}. This macro uses @code{AC_REQUIRE} for the compiler associated with the current language, which means that if the compiler has not yet been determined, the compiler determination will be made prior to the body of the outermost @code{AC_DEFUN} macro that triggered this macro to expand (@pxref{Expanded Before Required}). @end defmac @ovindex ERL For tests in Erlang, the @var{input} must be the source code of a module named @code{conftest}. @code{AC_COMPILE_IFELSE} generates a @file{conftest.beam} file that can be interpreted by the Erlang virtual machine (@code{ERL}). It is recommended to use @code{AC_LANG_PROGRAM} to specify the test program, to ensure that the Erlang module has the right name. @node Running the Linker @section Running the Linker To check for a library, a function, or a global variable, Autoconf @command{configure} scripts try to compile and link a small program that uses it. This is unlike Metaconfig, which by default uses @code{nm} or @code{ar} on the C library to try to figure out which functions are available. Trying to link with the function is usually a more reliable approach because it avoids dealing with the variations in the options and output formats of @code{nm} and @code{ar} and in the location of the standard libraries. It also allows configuring for cross-compilation or checking a function's runtime behavior if needed. On the other hand, it can be slower than scanning the libraries once, but accuracy is more important than speed. @code{AC_LINK_IFELSE} is used to compile test programs to test for functions and global variables. It is also used by @code{AC_CHECK_LIB} to check for libraries (@pxref{Libraries}), by adding the library being checked for to @code{LIBS} temporarily and trying to link a small program. @anchor{AC_LINK_IFELSE} @defmac AC_LINK_IFELSE (@var{input}, @ovar{action-if-true}, @ @ovar{action-if-false}) @acindex{LINK_IFELSE} Run the compiler (and compilation flags) and the linker of the current language (@pxref{Language Choice}) on the @var{input}, run the shell commands @var{action-if-true} on success, @var{action-if-false} otherwise. The @var{input} can be made by @code{AC_LANG_PROGRAM} and friends. If needed, @var{action-if-true} can further access the just-linked program file @file{conftest$EXEEXT}. @code{LDFLAGS} and @code{LIBS} are used for linking, in addition to the current compilation flags. It is customary to report unexpected failures with @code{AC_MSG_FAILURE}. This macro does not try to execute the program; use @code{AC_RUN_IFELSE} if you need to do that (@pxref{Runtime}). @end defmac The @code{AC_LINK_IFELSE} macro cannot be used for Erlang tests, since Erlang programs are interpreted and do not require linking. @node Runtime @section Checking Runtime Behavior Sometimes you need to find out how a system performs at runtime, such as whether a given function has a certain capability or bug. If you can, make such checks when your program runs instead of when it is configured. You can check for things like the machine's endianness when your program initializes itself. If you really need to test for a runtime behavior while configuring, you can write a test program to determine the result, and compile and run it using @code{AC_RUN_IFELSE}. Avoid running test programs if possible, because this prevents people from configuring your package for cross-compiling. @anchor{AC_RUN_IFELSE} @defmac AC_RUN_IFELSE (@var{input}, @ovar{action-if-true}, @ @ovar{action-if-false}, @dvar{action-if-cross-compiling, AC_MSG_FAILURE}) @acindex{RUN_IFELSE} Run the compiler (and compilation flags) and the linker of the current language (@pxref{Language Choice}) on the @var{input}, then execute the resulting program. If the program returns an exit status of 0 when executed, run shell commands @var{action-if-true}. Otherwise, run shell commands @var{action-if-false}. The @var{input} can be made by @code{AC_LANG_PROGRAM} and friends. @code{LDFLAGS} and @code{LIBS} are used for linking, in addition to the compilation flags of the current language (@pxref{Language Choice}). Additionally, @var{action-if-true} can run @command{./conftest$EXEEXT} for further testing. In the @var{action-if-false} section, the failing exit status is available in the shell variable @samp{$?}. This exit status might be that of a failed compilation, or it might be that of a failed program execution. If cross-compilation mode is enabled (this is the case if either the compiler being used does not produce executables that run on the system where @command{configure} is being run, or if the options @code{--build} and @code{--host} were both specified and their values are different), then the test program is not run. If the optional shell commands @var{action-if-cross-compiling} are given, those commands are run instead; typically these commands provide pessimistic defaults that allow cross-compilation to work even if the guess was wrong. If the fourth argument is empty or omitted, but cross-compilation is detected, then @command{configure} prints an error message and exits. If you want your package to be useful in a cross-compilation scenario, you @emph{should} provide a non-empty @var{action-if-cross-compiling} clause, as well as wrap the @code{AC_RUN_IFELSE} compilation inside an @code{AC_CACHE_CHECK} (@pxref{Caching Results}) which allows the user to override the pessimistic default if needed. It is customary to report unexpected failures with @code{AC_MSG_FAILURE}. @end defmac @command{autoconf} prints a warning message when creating @command{configure} each time it encounters a call to @code{AC_RUN_IFELSE} with no @var{action-if-cross-compiling} argument given. If you are not concerned about users configuring your package for cross-compilation, you may ignore the warning. A few of the macros distributed with Autoconf produce this warning message; but if this is a problem for you, please report it as a bug, along with an appropriate pessimistic guess to use instead. To configure for cross-compiling you can also choose a value for those parameters based on the canonical system name (@pxref{Manual Configuration}). Alternatively, set up a test results cache file with the correct values for the host system (@pxref{Caching Results}). @ovindex cross_compiling To provide a default for calls of @code{AC_RUN_IFELSE} that are embedded in other macros, including a few of the ones that come with Autoconf, you can test whether the shell variable @code{cross_compiling} is set to @samp{yes}, and then use an alternate method to get the results instead of calling the macros. It is also permissible to temporarily assign to @code{cross_compiling} in order to force tests to behave as though they are in a cross-compilation environment, particularly since this provides a way to test your @var{action-if-cross-compiling} even when you are not using a cross-compiler. @example # We temporarily set cross-compile mode to force AC_COMPUTE_INT # to use the slow link-only method save_cross_compiling=$cross_compiling cross_compiling=yes AC_COMPUTE_INT([@dots{}]) cross_compiling=$save_cross_compiling @end example A C or C++ runtime test should be portable. @xref{Portable C and C++}. Erlang tests must exit themselves the Erlang VM by calling the @code{halt/1} function: the given status code is used to determine the success of the test (status is @code{0}) or its failure (status is different than @code{0}), as explained above. It must be noted that data output through the standard output (e.g., using @code{io:format/2}) may be truncated when halting the VM. Therefore, if a test must output configuration information, it is recommended to create and to output data into the temporary file named @file{conftest.out}, using the functions of module @code{file}. The @code{conftest.out} file is automatically deleted by the @code{AC_RUN_IFELSE} macro. For instance, a simplified implementation of Autoconf's @code{AC_ERLANG_SUBST_LIB_DIR} macro is: @example AC_INIT([LibdirTest], [1.0], [bug-libdirtest@@example.org]) AC_ERLANG_NEED_ERL AC_LANG(Erlang) AC_RUN_IFELSE( [AC_LANG_PROGRAM([], [dnl file:write_file("conftest.out", code:lib_dir()), halt(0)])], [echo "code:lib_dir() returned: `cat conftest.out`"], [AC_MSG_FAILURE([test Erlang program execution failed])]) @end example @node Systemology @section Systemology @cindex Systemology This section aims at presenting some systems and pointers to documentation. It may help you addressing particular problems reported by users. @uref{https://@/en.wikipedia.org/@/wiki/@/POSIX, Posix-conforming systems} are derived from the @uref{https://@/en.wikipedia.org/@/wiki/@/Unix, Unix operating system}. The @uref{http://@/bhami.com/@/rosetta.html, Rosetta Stone for Unix} contains a table correlating the features of various Posix-conforming systems. @uref{https://@/www.levenez.com/@/unix/, Unix History} is a simplified diagram of how many Unix systems were derived from each other. @uref{http://@/heirloom.sourceforge.net/, The Heirloom Project} provides some variants of traditional implementations of Unix utilities. @table @asis @item Darwin @cindex Darwin Darwin is also known as Mac OS X@. Beware that the file system @emph{can} be case-preserving, but case insensitive. This can cause nasty problems, since for instance the installation attempt for a package having an @file{INSTALL} file can result in @samp{make install} report that nothing was to be done! That's all dependent on whether the file system is a UFS (case sensitive) or HFS+ (case preserving). By default Apple wants you to install the OS on HFS+. Unfortunately, there are some pieces of software which really need to be built on UFS@. We may want to rebuild Darwin to have both UFS and HFS+ available (and put the /local/build tree on the UFS). @item QNX 4.25 @cindex QNX 4.25 @c FIXME: Please, if you feel like writing something more precise, @c it'd be great. In particular, I can't understand the difference with @c QNX Neutrino. QNX is a realtime operating system running on Intel architecture meant to be scalable from the small embedded systems to the hundred processor super-computer. It claims to be Posix certified. More information is available on the @uref{https://@/blackberry.qnx.com/@/en, QNX home page}. @item Unix version 7 @cindex Unix version 7 @cindex V7 Officially this was called the ``Seventh Edition'' of ``the UNIX time-sharing system'' but we use the more-common name ``Unix version 7''. Documentation is available in the @uref{https://@/s3.amazonaws.com/@/plan9-bell-labs/@/7thEdMan/@/index.html, Unix Seventh Edition Manual}. Previous versions of Unix are called ``Unix version 6'', etc., but they were not as widely used. @end table @node Multiple Cases @section Multiple Cases Some operations are accomplished in several possible ways, depending on the OS variant. Checking for them essentially requires a ``case statement''. Autoconf does not directly provide one; however, it is easy to simulate by using a shell variable to keep track of whether a way to perform the operation has been found yet. Here is an example that uses the shell variable @code{fstype} to keep track of whether the remaining cases need to be checked. Note that since the value of @code{fstype} is under our control, we don't have to use the longer @samp{test "x$fstype" = xno}. @example @group AC_MSG_CHECKING([how to get file system type]) fstype=no # The order of these tests is important. AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/statvfs.h> #include <sys/fstyp.h>]])], [AC_DEFINE([FSTYPE_STATVFS], [1], [Define if statvfs exists.]) fstype=SVR4]) if test $fstype = no; then AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/statfs.h> #include <sys/fstyp.h>]])], [AC_DEFINE([FSTYPE_USG_STATFS], [1], [Define if USG statfs.]) fstype=SVR3]) fi if test $fstype = no; then AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <sys/statfs.h> #include <sys/vmount.h>]])]), [AC_DEFINE([FSTYPE_AIX_STATFS], [1], [Define if AIX statfs.]) fstype=AIX]) fi # (more cases omitted here) AC_MSG_RESULT([$fstype]) @end group @end example @c ====================================================== Results of Tests. @node Results @chapter Results of Tests Once @command{configure} has determined whether a feature exists, what can it do to record that information? There are four sorts of things it can do: define a C preprocessor symbol, set a variable in the output files, save the result in a cache file for future @command{configure} runs, and print a message letting the user know the result of the test. @menu * Defining Symbols:: Defining C preprocessor symbols * Setting Output Variables:: Replacing variables in output files * Special Chars in Variables:: Characters to beware of in variables * Caching Results:: Speeding up subsequent @command{configure} runs * Printing Messages:: Notifying @command{configure} users @end menu @node Defining Symbols @section Defining C Preprocessor Symbols A common action to take in response to a feature test is to define a C preprocessor symbol indicating the results of the test. That is done by calling @code{AC_DEFINE} or @code{AC_DEFINE_UNQUOTED}. By default, @code{AC_OUTPUT} places the symbols defined by these macros into the output variable @code{DEFS}, which contains an option @option{-D@var{symbol}=@var{value}} for each symbol defined. Unlike in Autoconf version 1, there is no variable @code{DEFS} defined while @command{configure} is running. To check whether Autoconf macros have already defined a certain C preprocessor symbol, test the value of the appropriate cache variable, as in this example: @example AC_CHECK_FUNC([vprintf], [AC_DEFINE([HAVE_VPRINTF], [1], [Define if vprintf exists.])]) if test "x$ac_cv_func_vprintf" != xyes; then AC_CHECK_FUNC([_doprnt], [AC_DEFINE([HAVE_DOPRNT], [1], [Define if _doprnt exists.])]) fi @end example If @code{AC_CONFIG_HEADERS} has been called, then instead of creating @code{DEFS}, @code{AC_OUTPUT} creates a header file by substituting the correct values into @code{#define} statements in a template file. @xref{Configuration Headers}, for more information about this kind of output. @defmac AC_DEFINE (@var{variable}, @var{value}, @ovar{description}) @defmacx AC_DEFINE (@var{variable}) @cvindex @var{variable} @acindex{DEFINE} Define @var{variable} to @var{value} (verbatim), by defining a C preprocessor macro for @var{variable}. @var{variable} should be a C identifier, optionally suffixed by a parenthesized argument list to define a C preprocessor macro with arguments. The macro argument list, if present, should be a comma-separated list of C identifiers, possibly terminated by an ellipsis @samp{...} if C99-or-later syntax is employed. @var{variable} should not contain comments, white space, trigraphs, backslash-newlines, universal character names, or non-ASCII characters. @var{value} may contain backslash-escaped newlines, which will be preserved if you use @code{AC_CONFIG_HEADERS} but flattened if passed via @code{@@DEFS@@} (with no effect on the compilation, since the preprocessor sees only one line in the first place). @var{value} should not contain raw newlines. If you are not using @code{AC_CONFIG_HEADERS}, @var{value} should not contain any @samp{#} characters, as @command{make} tends to eat them. To use a shell variable, use @code{AC_DEFINE_UNQUOTED} instead. @var{description} is only useful if you are using @code{AC_CONFIG_HEADERS}. In this case, @var{description} is put into the generated @file{config.h.in} as the comment before the macro define. The following example defines the C preprocessor variable @code{EQUATION} to be the string constant @samp{"$a > $b"}: @example AC_DEFINE([EQUATION], ["$a > $b"], [Equation string.]) @end example If neither @var{value} nor @var{description} are given, then @var{value} defaults to 1 instead of to the empty string. This is for backwards compatibility with older versions of Autoconf, but this usage is obsolescent and may be withdrawn in future versions of Autoconf. If the @var{variable} is a literal string, it is passed to @code{m4_pattern_allow} (@pxref{Forbidden Patterns}). If multiple @code{AC_DEFINE} statements are executed for the same @var{variable} name (not counting any parenthesized argument list), the last one wins. @end defmac @defmac AC_DEFINE_UNQUOTED (@var{variable}, @var{value}, @ovar{description}) @defmacx AC_DEFINE_UNQUOTED (@var{variable}) @acindex{DEFINE_UNQUOTED} @cvindex @var{variable} Like @code{AC_DEFINE}, but three shell expansions are performed---once---on @var{variable} and @var{value}: variable expansion (@samp{$}), command substitution (@samp{`}), and backslash escaping (@samp{\}), as if in an unquoted here-document. Single and double quote characters in the value have no special meaning. Use this macro instead of @code{AC_DEFINE} when @var{variable} or @var{value} is a shell variable. Examples: @example AC_DEFINE_UNQUOTED([config_machfile], ["$machfile"], [Configuration machine file.]) AC_DEFINE_UNQUOTED([GETGROUPS_T], [$ac_cv_type_getgroups], [getgroups return type.]) AC_DEFINE_UNQUOTED([$ac_tr_hdr], [1], [Translated header name.]) @end example @end defmac Due to a syntactical oddity of the Bourne shell, do not use semicolons to separate @code{AC_DEFINE} or @code{AC_DEFINE_UNQUOTED} calls from other macro calls or shell code; that can cause syntax errors in the resulting @command{configure} script. Use either blanks or newlines. That is, do this: @example AC_CHECK_HEADER([elf.h], [AC_DEFINE([SVR4], [1], [System V Release 4]) LIBS="-lelf $LIBS"]) @end example @noindent or this: @example AC_CHECK_HEADER([elf.h], [AC_DEFINE([SVR4], [1], [System V Release 4]) LIBS="-lelf $LIBS"]) @end example @noindent instead of this: @example AC_CHECK_HEADER([elf.h], [AC_DEFINE([SVR4], [1], [System V Release 4]); LIBS="-lelf $LIBS"]) @end example @node Setting Output Variables @section Setting Output Variables @cindex Output variables Another way to record the results of tests is to set @dfn{output variables}, which are shell variables whose values are substituted into files that @command{configure} outputs. The two macros below create new output variables. @xref{Preset Output Variables}, for a list of output variables that are always available. @defmac AC_SUBST (@var{variable}, @ovar{value}) @acindex{SUBST} Create an output variable from a shell variable. Make @code{AC_OUTPUT} substitute the variable @var{variable} into output files (typically one or more makefiles). This means that @code{AC_OUTPUT} replaces instances of @samp{@@@var{variable}@@} in input files with the value that the shell variable @var{variable} has when @code{AC_OUTPUT} is called. The value can contain any non-@code{NUL} character, including newline. If you are using Automake 1.11 or newer, for newlines in values you might want to consider using @code{AM_SUBST_NOTMAKE} to prevent @command{automake} from adding a line @code{@var{variable} = @@@var{variable}@@} to the @file{Makefile.in} files (@pxref{Optional, , Automake, automake, Other things Automake recognizes}). Variable occurrences should not overlap: e.g., an input file should not contain @samp{@@@var{var1}@@@var{var2}@@} if @var{var1} and @var{var2} are variable names. The substituted value is not rescanned for more output variables; occurrences of @samp{@@@var{variable}@@} in the value are inserted literally into the output file. (The algorithm uses the special marker @code{|#_!!_#|} internally, so neither the substituted value nor the output file may contain @code{|#_!!_#|}.) If @var{value} is given, in addition assign it to @var{variable}. The string @var{variable} is passed to @code{m4_pattern_allow} (@pxref{Forbidden Patterns}). @var{variable} is not further expanded, even if there is another macro by the same name. @end defmac @defmac AC_SUBST_FILE (@var{variable}) @acindex{SUBST_FILE} Another way to create an output variable from a shell variable. Make @code{AC_OUTPUT} insert (without substitutions) the contents of the file named by shell variable @var{variable} into output files. This means that @code{AC_OUTPUT} replaces instances of @samp{@@@var{variable}@@} in output files (such as @file{Makefile.in}) with the contents of the file that the shell variable @var{variable} names when @code{AC_OUTPUT} is called. Set the variable to @file{/dev/null} for cases that do not have a file to insert. This substitution occurs only when the @samp{@@@var{variable}@@} is on a line by itself, optionally surrounded by spaces and tabs. The substitution replaces the whole line, including the spaces, tabs, and the terminating newline. This macro is useful for inserting makefile fragments containing special dependencies or other @command{make} directives for particular host or target types into makefiles. For example, @file{configure.ac} could contain: @example AC_SUBST_FILE([host_frag]) host_frag=$srcdir/conf/sun4.mh @end example @noindent and then a @file{Makefile.in} could contain: @example @@host_frag@@ @end example The string @var{variable} is passed to @code{m4_pattern_allow} (@pxref{Forbidden Patterns}). @end defmac @cindex Precious Variable @cindex Variable, Precious Running @command{configure} in varying environments can be extremely dangerous. If for instance the user runs @samp{CC=bizarre-cc ./configure}, then the cache, @file{config.h}, and many other output files depend upon @command{bizarre-cc} being the C compiler. If for some reason the user runs @command{./configure} again, or if it is run via @samp{./config.status --recheck}, (@xref{Automatic Remaking}, and @pxref{config.status Invocation}), then the configuration can be inconsistent, composed of results depending upon two different compilers. Environment variables that affect this situation, such as @samp{CC} above, are called @dfn{precious variables}, and can be declared as such by @code{AC_ARG_VAR}. @defmac AC_ARG_VAR (@var{variable}, @var{description}) @acindex{ARG_VAR} Declare @var{variable} is a precious variable, and include its @var{description} in the variable section of @samp{./configure --help}. Being precious means that @itemize @minus @item @var{variable} is substituted via @code{AC_SUBST}. @item The value of @var{variable} when @command{configure} was launched is saved in the cache, including if it was not specified on the command line but via the environment. Indeed, while @command{configure} can notice the definition of @code{CC} in @samp{./configure CC=bizarre-cc}, it is impossible to notice it in @samp{CC=bizarre-cc ./configure}, which, unfortunately, is what most users do. We emphasize that it is the @emph{initial} value of @var{variable} which is saved, not that found during the execution of @command{configure}. Indeed, specifying @samp{./configure FOO=foo} and letting @samp{./configure} guess that @code{FOO} is @code{foo} can be two different things. @item @var{variable} is checked for consistency between two @command{configure} runs. For instance: @example $ @kbd{./configure --silent --config-cache} $ @kbd{CC=cc ./configure --silent --config-cache} configure: error: 'CC' was not set in the previous run configure: error: changes in the environment can compromise \ the build configure: error: run 'make distclean' and/or \ 'rm config.cache' and start over @end example @noindent and similarly if the variable is unset, or if its content is changed. If the content has white space changes only, then the error is degraded to a warning only, but the old value is reused. @item @var{variable} is kept during automatic reconfiguration (@pxref{config.status Invocation}) as if it had been passed as a command line argument, including when no cache is used: @example $ @kbd{CC=/usr/bin/cc ./configure var=raboof --silent} $ @kbd{./config.status --recheck} running CONFIG_SHELL=/bin/sh /bin/sh ./configure var=raboof \ CC=/usr/bin/cc --no-create --no-recursion @end example @end itemize @end defmac @node Special Chars in Variables @section Special Characters in Output Variables @cindex Output variables, special characters in Many output variables are intended to be evaluated both by @command{make} and by the shell. Some characters are expanded differently in these two contexts, so to avoid confusion these variables' values should not contain any of the following characters: @example " # $ & ' ( ) * ; < > ? [ \ ^ ` | @end example Also, these variables' values should neither contain newlines, nor start with @samp{~}, nor contain white space or @samp{:} immediately followed by @samp{~}. The values can contain nonempty sequences of white space characters like tabs and spaces, but each such sequence might arbitrarily be replaced by a single space during substitution. These restrictions apply both to the values that @command{configure} computes, and to the values set directly by the user. For example, the following invocations of @command{configure} are problematic, since they attempt to use special characters within @code{CPPFLAGS} and white space within @code{$(srcdir)}: @example CPPFLAGS='-DOUCH="&\"#$*?"' '../My Source/ouch-1.0/configure' '../My Source/ouch-1.0/configure' CPPFLAGS='-DOUCH="&\"#$*?"' @end example @node Caching Results @section Caching Results @cindex Cache To avoid checking for the same features repeatedly in various @command{configure} scripts (or in repeated runs of one script), @command{configure} can optionally save the results of many checks in a @dfn{cache file} (@pxref{Cache Files}). If a @command{configure} script runs with caching enabled and finds a cache file, it reads the results of previous runs from the cache and avoids rerunning those checks. As a result, @command{configure} can then run much faster than if it had to perform all of the checks every time. @defmac AC_CACHE_VAL (@var{cache-id}, @var{commands-to-set-it}) @acindex{CACHE_VAL} Ensure that the results of the check identified by @var{cache-id} are available. If the results of the check were in the cache file that was read, and @command{configure} was not given the @option{--quiet} or @option{--silent} option, print a message saying that the result was cached; otherwise, run the shell commands @var{commands-to-set-it}. If the shell commands are run to determine the value, the value is saved in the cache file just before @command{configure} creates its output files. @xref{Cache Variable Names}, for how to choose the name of the @var{cache-id} variable. The @var{commands-to-set-it} @emph{must have no side effects} except for setting the variable @var{cache-id}, see below. @end defmac @defmac AC_CACHE_CHECK (@var{message}, @var{cache-id}, @ @var{commands-to-set-it}) @acindex{CACHE_CHECK} A wrapper for @code{AC_CACHE_VAL} that takes care of printing the messages. This macro provides a convenient shorthand for the most common way to use these macros. It calls @code{AC_MSG_CHECKING} for @var{message}, then @code{AC_CACHE_VAL} with the @var{cache-id} and @var{commands} arguments, and @code{AC_MSG_RESULT} with @var{cache-id}. The @var{commands-to-set-it} @emph{must have no side effects} except for setting the variable @var{cache-id}, see below. @end defmac It is common to find buggy macros using @code{AC_CACHE_VAL} or @code{AC_CACHE_CHECK}, because people are tempted to call @code{AC_DEFINE} in the @var{commands-to-set-it}. Instead, the code that @emph{follows} the call to @code{AC_CACHE_VAL} should call @code{AC_DEFINE}, by examining the value of the cache variable. For instance, the following macro is broken: @example @c If you change this example, adjust tests/base.at:AC_CACHE_CHECK. @group AC_DEFUN([AC_SHELL_TRUE], [AC_CACHE_CHECK([whether true(1) works], [my_cv_shell_true_works], [my_cv_shell_true_works=no (true) 2>/dev/null && my_cv_shell_true_works=yes if test "x$my_cv_shell_true_works" = xyes; then AC_DEFINE([TRUE_WORKS], [1], [Define if 'true(1)' works properly.]) fi]) ]) @end group @end example @noindent This fails if the cache is enabled: the second time this macro is run, @code{TRUE_WORKS} @emph{will not be defined}. The proper implementation is: @example @c If you change this example, adjust tests/base.at:AC_CACHE_CHECK. @group AC_DEFUN([AC_SHELL_TRUE], [AC_CACHE_CHECK([whether true(1) works], [my_cv_shell_true_works], [my_cv_shell_true_works=no (true) 2>/dev/null && my_cv_shell_true_works=yes]) if test "x$my_cv_shell_true_works" = xyes; then AC_DEFINE([TRUE_WORKS], [1], [Define if 'true(1)' works properly.]) fi ]) @end group @end example Also, @var{commands-to-set-it} should not print any messages, for example with @code{AC_MSG_CHECKING}; do that before calling @code{AC_CACHE_VAL}, so the messages are printed regardless of whether the results of the check are retrieved from the cache or determined by running the shell commands. @menu * Cache Variable Names:: Shell variables used in caches * Cache Files:: Files @command{configure} uses for caching * Cache Checkpointing:: Loading and saving the cache file @end menu @node Cache Variable Names @subsection Cache Variable Names @cindex Cache variable The names of cache variables should have the following format: @example @var{package-prefix}_cv_@var{value-type}_@var{specific-value}_@ovar{additional-options} @end example @noindent for example, @samp{ac_cv_header_stat_broken} or @samp{ac_cv_prog_gcc_traditional}. The parts of the variable name are: @table @asis @item @var{package-prefix} An abbreviation for your package or organization; the same prefix you begin local Autoconf macros with, except lowercase by convention. For cache values used by the distributed Autoconf macros, this value is @samp{ac}. @item @code{_cv_} Indicates that this shell variable is a cache value. This string @emph{must} be present in the variable name, including the leading underscore. @item @var{value-type} A convention for classifying cache values, to produce a rational naming system. The values used in Autoconf are listed in @ref{Macro Names}. @item @var{specific-value} Which member of the class of cache values this test applies to. For example, which function (@samp{alloca}), program (@samp{gcc}), or output variable (@samp{INSTALL}). @item @var{additional-options} Any particular behavior of the specific member that this test applies to. For example, @samp{broken} or @samp{set}. This part of the name may be omitted if it does not apply. @end table The values assigned to cache variables may not contain newlines. Usually, their values are Boolean (@samp{yes} or @samp{no}) or the names of files or functions; so this is not an important restriction. @ref{Cache Variable Index} for an index of cache variables with documented semantics. @node Cache Files @subsection Cache Files A cache file is a shell script that caches the results of configure tests run on one system so they can be shared between configure scripts and configure runs. It is not useful on other systems. If its contents are invalid for some reason, the user may delete or edit it, or override documented cache variables on the @command{configure} command line. By default, @command{configure} uses no cache file, to avoid problems caused by accidental use of stale cache files. To enable caching, @command{configure} accepts @option{--config-cache} (or @option{-C}) to cache results in the file @file{config.cache}. Alternatively, @option{--cache-file=@var{file}} specifies that @var{file} be the cache file. The cache file is created if it does not exist already. When @command{configure} calls @command{configure} scripts in subdirectories, it uses the @option{--cache-file} argument so that they share the same cache. @xref{Subdirectories}, for information on configuring subdirectories with the @code{AC_CONFIG_SUBDIRS} macro. @file{config.status} only pays attention to the cache file if it is given the @option{--recheck} option, which makes it rerun @command{configure}. It is wrong to try to distribute cache files for particular system types. There is too much room for error in doing that, and too much administrative overhead in maintaining them. For any features that can't be guessed automatically, use the standard method of the canonical system type and linking files (@pxref{Manual Configuration}). The site initialization script can specify a site-wide cache file to use, instead of the usual per-program cache. In this case, the cache file gradually accumulates information whenever someone runs a new @command{configure} script. (Running @command{configure} merges the new cache results with the existing cache file.) This may cause problems, however, if the system configuration (e.g., the installed libraries or compilers) changes and the stale cache file is not deleted. If @command{configure} is interrupted at the right time when it updates a cache file outside of the build directory where the @command{configure} script is run, it may leave behind a temporary file named after the cache file with digits following it. You may safely delete such a file. @node Cache Checkpointing @subsection Cache Checkpointing If your configure script, or a macro called from @file{configure.ac}, happens to abort the configure process, it may be useful to checkpoint the cache a few times at key points using @code{AC_CACHE_SAVE}. Doing so reduces the amount of time it takes to rerun the configure script with (hopefully) the error that caused the previous abort corrected. @c FIXME: Do we really want to document this guy? @defmac AC_CACHE_LOAD @acindex{CACHE_LOAD} Loads values from existing cache file, or creates a new cache file if a cache file is not found. Called automatically from @code{AC_INIT}. @end defmac @defmac AC_CACHE_SAVE @acindex{CACHE_SAVE} Flushes all cached values to the cache file. Called automatically from @code{AC_OUTPUT}, but it can be quite useful to call @code{AC_CACHE_SAVE} at key points in @file{configure.ac}. @end defmac For instance: @example @r{ @dots{} AC_INIT, etc. @dots{}} @group # Checks for programs. AC_PROG_CC AC_PROG_AWK @r{ @dots{} more program checks @dots{}} AC_CACHE_SAVE @end group @group # Checks for libraries. AC_CHECK_LIB([nsl], [gethostbyname]) AC_CHECK_LIB([socket], [connect]) @r{ @dots{} more lib checks @dots{}} AC_CACHE_SAVE @end group @group # Might abort@dots{} AM_PATH_GTK([1.0.2], [], [AC_MSG_ERROR([GTK not in path])]) AM_PATH_GTKMM([0.9.5], [], [AC_MSG_ERROR([GTK not in path])]) @end group @r{ @dots{} AC_OUTPUT, etc. @dots{}} @end example @node Printing Messages @section Printing Messages @cindex Messages, from @command{configure} @command{configure} scripts need to give users running them several kinds of information. The following macros print messages in ways appropriate for each kind. The arguments to all of them get enclosed in shell double quotes, so the shell performs variable and back-quote substitution on them. These macros are all wrappers around the @command{echo} shell command. They direct output to the appropriate file descriptor (@pxref{File Descriptor Macros}). @command{configure} scripts should rarely need to run @command{echo} directly to print messages for the user. Using these macros makes it easy to change how and when each kind of message is printed; such changes need only be made to the macro definitions and all the callers change automatically. To diagnose static issues, i.e., when @command{autoconf} is run, see @ref{Diagnostic Macros}. @defmac AC_MSG_CHECKING (@var{feature-description}) @acindex{MSG_CHECKING} Notify the user that @command{configure} is checking for a particular feature. This macro prints a message that starts with @samp{checking } and ends with @samp{...} and no newline. It must be followed by a call to @code{AC_MSG_RESULT} to print the result of the check and the newline. The @var{feature-description} should be something like @samp{whether the Fortran compiler accepts C++ comments} or @samp{for _Alignof}. This macro prints nothing if @command{configure} is run with the @option{--quiet} or @option{--silent} option. @end defmac @anchor{AC_MSG_RESULT} @defmac AC_MSG_RESULT (@var{result-description}) @acindex{MSG_RESULT} Notify the user of the results of a check. @var{result-description} is almost always the value of the cache variable for the check, typically @samp{yes}, @samp{no}, or a file name. This macro should follow a call to @code{AC_MSG_CHECKING}, and the @var{result-description} should be the completion of the message printed by the call to @code{AC_MSG_CHECKING}. This macro prints nothing if @command{configure} is run with the @option{--quiet} or @option{--silent} option. @end defmac @anchor{AC_MSG_NOTICE} @defmac AC_MSG_NOTICE (@var{message}) @acindex{MSG_NOTICE} Deliver the @var{message} to the user. It is useful mainly to print a general description of the overall purpose of a group of feature checks, e.g., @example AC_MSG_NOTICE([checking if stack overflow is detectable]) @end example This macro prints nothing if @command{configure} is run with the @option{--quiet} or @option{--silent} option. @end defmac @anchor{AC_MSG_ERROR} @defmac AC_MSG_ERROR (@var{error-description}, @dvar{exit-status, $?/1}) @acindex{MSG_ERROR} Notify the user of an error that prevents @command{configure} from completing. This macro prints an error message to the standard error output and exits @command{configure} with @var{exit-status} (@samp{$?} by default, except that @samp{0} is converted to @samp{1}). @var{error-description} should be something like @samp{invalid value $HOME for \$HOME}. The @var{error-description} should start with a lower-case letter, and ``cannot'' is preferred to ``can't''. @end defmac @defmac AC_MSG_FAILURE (@var{error-description}, @ovar{exit-status}) @acindex{MSG_FAILURE} This @code{AC_MSG_ERROR} wrapper notifies the user of an error that prevents @command{configure} from completing @emph{and} that additional details are provided in @file{config.log}. This is typically used when abnormal results are found during a compilation. @end defmac @anchor{AC_MSG_WARN} @defmac AC_MSG_WARN (@var{problem-description}) @acindex{MSG_WARN} Notify the @command{configure} user of a possible problem. This macro prints the message to the standard error output; @command{configure} continues running afterward, so macros that call @code{AC_MSG_WARN} should provide a default (back-up) behavior for the situations they warn about. @var{problem-description} should be something like @samp{ln -s seems to make hard links}. @end defmac @c ====================================================== Programming in M4. @node Programming in M4 @chapter Programming in M4 @cindex M4 Autoconf is written on top of two layers: @dfn{M4sugar}, which provides convenient macros for pure M4 programming, and @dfn{M4sh}, which provides macros dedicated to shell script generation. As of this version of Autoconf, these two layers still contain experimental macros, whose interface might change in the future. As a matter of fact, @emph{anything that is not documented must not be used}. @menu * M4 Quotation:: Protecting macros from unwanted expansion * Using autom4te:: The Autoconf executables backbone * Programming in M4sugar:: Convenient pure M4 macros * Debugging via autom4te:: Figuring out what M4 was doing @end menu @node M4 Quotation @section M4 Quotation @cindex M4 quotation @cindex quotation The most common problem with existing macros is an improper quotation. This section, which users of Autoconf can skip, but which macro writers @emph{must} read, first justifies the quotation scheme that was chosen for Autoconf and then ends with a rule of thumb. Understanding the former helps one to follow the latter. @menu * Active Characters:: Characters that change the behavior of M4 * One Macro Call:: Quotation and one macro call * Quoting and Parameters:: M4 vs. shell parameters * Quotation and Nested Macros:: Macros calling macros * Changequote is Evil:: Worse than INTERCAL: M4 + changequote * Quadrigraphs:: Another way to escape special characters * Balancing Parentheses:: Dealing with unbalanced parentheses * Quotation Rule Of Thumb:: One parenthesis, one quote @end menu @node Active Characters @subsection Active Characters To fully understand where proper quotation is important, you first need to know what the special characters are in Autoconf: @samp{#} introduces a comment inside which no macro expansion is performed, @samp{,} separates arguments, @samp{[} and @samp{]} are the quotes themselves@footnote{By itself, M4 uses @samp{`} and @samp{'}; it is the M4sugar layer that sets up the preferred quotes of @samp{[} and @samp{]}.}, @samp{(} and @samp{)} (which M4 tries to match by pairs), and finally @samp{$} inside a macro definition. In order to understand the delicate case of macro calls, we first have to present some obvious failures. Below they are ``obvious-ified'', but when you find them in real life, they are usually in disguise. Comments, introduced by a hash and running up to the newline, are opaque tokens to the top level: active characters are turned off, and there is no macro expansion: @example # define([def], ine) @result{}# define([def], ine) @end example Each time there can be a macro expansion, there is a quotation expansion, i.e., one level of quotes is stripped: @example int tab[10]; @result{}int tab10; [int tab[10];] @result{}int tab[10]; @end example Without this in mind, the reader might try hopelessly to use her macro @code{array}: @example define([array], [int tab[10];]) array @result{}int tab10; [array] @result{}array @end example @noindent How can you correctly output the intended results@footnote{Using @code{defn}.}? @node One Macro Call @subsection One Macro Call Let's proceed on the interaction between active characters and macros with this small macro, which just returns its first argument: @example define([car], [$1]) @end example @noindent The two pairs of quotes above are not part of the arguments of @code{define}; rather, they are understood by the top level when it tries to find the arguments of @code{define}. Therefore, assuming @code{car} is not already defined, it is equivalent to write: @example define(car, $1) @end example @noindent But, while it is acceptable for a @file{configure.ac} to avoid unnecessary quotes, it is bad practice for Autoconf macros which must both be more robust and also advocate perfect style. At the top level, there are only two possibilities: either you quote or you don't: @example car(foo, bar, baz) @result{}foo [car(foo, bar, baz)] @result{}car(foo, bar, baz) @end example Let's pay attention to the special characters: @example car(#) @error{}EOF in argument list @end example The closing parenthesis is hidden in the comment; with a hypothetical quoting, the top level understood it this way: @example car([#)] @end example @noindent Proper quotation, of course, fixes the problem: @example car([#]) @result{}# @end example Here are more examples: @example car(foo, bar) @result{}foo car([foo, bar]) @result{}foo, bar car((foo, bar)) @result{}(foo, bar) car([(foo], [bar)]) @result{}(foo define([a], [b]) @result{} car(a) @result{}b car([a]) @result{}b car([[a]]) @result{}a car([[[a]]]) @result{}[a] @end example @node Quoting and Parameters @subsection Quoting and Parameters When M4 encounters @samp{$} within a macro definition, followed immediately by a character it recognizes (@samp{0}@dots{}@samp{9}, @samp{#}, @samp{@@}, or @samp{*}), it will perform M4 parameter expansion. This happens regardless of how many layers of quotes the parameter expansion is nested within, or even if it occurs in text that will be rescanned as a comment. @example define([none], [$1]) @result{} define([one], [[$1]]) @result{} define([two], [[[$1]]]) @result{} define([comment], [# $1]) @result{} define([active], [ACTIVE]) @result{} none([active]) @result{}ACTIVE one([active]) @result{}active two([active]) @result{}[active] comment([active]) @result{}# active @end example On the other hand, since autoconf generates shell code, you often want to output shell variable expansion, rather than performing M4 parameter expansion. To do this, you must use M4 quoting to separate the @samp{$} from the next character in the definition of your macro. If the macro definition occurs in single-quoted text, then insert another level of quoting; if the usage is already inside a double-quoted string, then split it into concatenated strings. @example define([foo], [a single-quoted $[]1 definition]) @result{} define([bar], [[a double-quoted $][1 definition]]) @result{} foo @result{}a single-quoted $1 definition bar @result{}a double-quoted $1 definition @end example Posix states that M4 implementations are free to provide implementation extensions when @samp{$@{} is encountered in a macro definition. Autoconf reserves the longer sequence @samp{$@{@{} for use with planned extensions that will be available in the future GNU M4 2.0, but guarantees that all other instances of @samp{$@{} will be output literally. Therefore, this idiom can also be used to output shell code parameter references: @example define([first], [$@{1@}])first @result{}$@{1@} @end example Posix also states that @samp{$11} should expand to the first parameter concatenated with a literal @samp{1}, although some versions of GNU M4 expand the eleventh parameter instead. For portability, you should only use single-digit M4 parameter expansion. With this in mind, we can explore the cases where macros invoke macros@enddots{} @node Quotation and Nested Macros @subsection Quotation and Nested Macros The examples below use the following macros: @example define([car], [$1]) define([active], [ACT, IVE]) define([array], [int tab[10]]) @end example Each additional embedded macro call introduces other possible interesting quotations: @example car(active) @result{}ACT car([active]) @result{}ACT, IVE car([[active]]) @result{}active @end example In the first case, the top level looks for the arguments of @code{car}, and finds @samp{active}. Because M4 evaluates its arguments before applying the macro, @samp{active} is expanded, which results in: @example car(ACT, IVE) @result{}ACT @end example @noindent In the second case, the top level gives @samp{active} as first and only argument of @code{car}, which results in: @example active @result{}ACT, IVE @end example @noindent i.e., the argument is evaluated @emph{after} the macro that invokes it. In the third case, @code{car} receives @samp{[active]}, which results in: @example [active] @result{}active @end example @noindent exactly as we already saw above. The example above, applied to a more realistic example, gives: @example car(int tab[10];) @result{}int tab10; car([int tab[10];]) @result{}int tab10; car([[int tab[10];]]) @result{}int tab[10]; @end example @noindent Huh? The first case is easily understood, but why is the second wrong, and the third right? To understand that, you must know that after M4 expands a macro, the resulting text is immediately subjected to macro expansion and quote removal. This means that the quote removal occurs twice---first before the argument is passed to the @code{car} macro, and second after the @code{car} macro expands to the first argument. As the author of the Autoconf macro @code{car}, you then consider it to be incorrect that your users have to double-quote the arguments of @code{car}, so you ``fix'' your macro. Let's call it @code{qar} for quoted car: @example define([qar], [[$1]]) @end example @noindent and check that @code{qar} is properly fixed: @example qar([int tab[10];]) @result{}int tab[10]; @end example @noindent Ahhh! That's much better. But note what you've done: now that the result of @code{qar} is always a literal string, the only time a user can use nested macros is if she relies on an @emph{unquoted} macro call: @example qar(active) @result{}ACT qar([active]) @result{}active @end example @noindent leaving no way for her to reproduce what she used to do with @code{car}: @example car([active]) @result{}ACT, IVE @end example @noindent Worse yet: she wants to use a macro that produces a set of @code{cpp} macros: @example define([my_includes], [#include <stdio.h>]) car([my_includes]) @result{}#include <stdio.h> qar(my_includes) @error{}EOF in argument list @end example This macro, @code{qar}, because it double quotes its arguments, forces its users to leave their macro calls unquoted, which is dangerous. Commas and other active symbols are interpreted by M4 before they are given to the macro, often not in the way the users expect. Also, because @code{qar} behaves differently from the other macros, it's an exception that should be avoided in Autoconf. @node Changequote is Evil @subsection @code{changequote} is Evil @cindex @code{changequote} The temptation is often high to bypass proper quotation, in particular when it's late at night. Then, many experienced Autoconf hackers finally surrender to the dark side of the force and use the ultimate weapon: @code{changequote}. The M4 builtin @code{changequote} belongs to a set of primitives that allow one to adjust the syntax of the language to adjust it to one's needs. For instance, by default M4 uses @samp{`} and @samp{'} as quotes, but in the context of shell programming (and actually of most programming languages), that's about the worst choice one can make: because of strings and back-quoted expressions in shell code (such as @samp{'this'} and @samp{`that`}), and because of literal characters in usual programming languages (as in @samp{'0'}), there are many unbalanced @samp{`} and @samp{'}. Proper M4 quotation then becomes a nightmare, if not impossible. In order to make M4 useful in such a context, its designers have equipped it with @code{changequote}, which makes it possible to choose another pair of quotes. M4sugar, M4sh, Autoconf, and Autotest all have chosen to use @samp{[} and @samp{]}. Not especially because they are unlikely characters, but @emph{because they are characters unlikely to be unbalanced}. There are other magic primitives, such as @code{changecom} to specify what syntactic forms are comments (it is common to see @samp{changecom(<!--, -->)} when M4 is used to produce HTML pages), @code{changeword} and @code{changesyntax} to change other syntactic details (such as the character to denote the @var{n}th argument, @samp{$} by default, the parentheses around arguments, etc.). These primitives are really meant to make M4 more useful for specific domains: they should be considered like command line options: @option{--quotes}, @option{--comments}, @option{--words}, and @option{--syntax}. Nevertheless, they are implemented as M4 builtins, as it makes M4 libraries self contained (no need for additional options). There lies the problem@enddots{} @sp 1 The problem is that it is then tempting to use them in the middle of an M4 script, as opposed to its initialization. This, if not carefully thought out, can lead to disastrous effects: @emph{you are changing the language in the middle of the execution}. Changing and restoring the syntax is often not enough: if you happened to invoke macros in between, these macros are lost, as the current syntax is probably not the one they were implemented with. @c FIXME: I've been looking for a short, real case example, but I @c lost them all :( @node Quadrigraphs @subsection Quadrigraphs @cindex quadrigraphs @cindex @samp{@@S|@@} @cindex @samp{@@&t@@} @c Info cannot handle ':' in index entries. @ifnotinfo @cindex @samp{@@<:@@} @cindex @samp{@@:>@@} @cindex @samp{@@%:@@} @cindex @samp{@@@{:@@} @cindex @samp{@@:@}@@} @end ifnotinfo When writing an Autoconf macro you may occasionally need to generate special characters that are difficult to express with the standard Autoconf quoting rules. For example, you may need to output the regular expression @samp{[^[]}, which matches any character other than @samp{[}. This expression contains unbalanced brackets so it cannot be put easily into an M4 macro. Additionally, there are a few m4sugar macros (such as @code{m4_split} and @code{m4_expand}) which internally use special markers in addition to the regular quoting characters. If the arguments to these macros contain the literal strings @samp{-=<@{(} or @samp{)@}>=-}, the macros might behave incorrectly. You can work around these problems by using one of the following @dfn{quadrigraphs}: @table @samp @item @@<:@@ @samp{[} @item @@:>@@ @samp{]} @item @@S|@@ @samp{$} @item @@%:@@ @samp{#} @item @@@{:@@ @samp{(} @item @@:@}@@ @samp{)} @item @@&t@@ Expands to nothing. @end table Quadrigraphs are replaced at a late stage of the translation process, after @command{m4} is run, so they do not get in the way of M4 quoting. For example, the string @samp{^@@<:@@}, independently of its quotation, appears as @samp{^[} in the output. The empty quadrigraph can be used: @itemize @minus @item to mark trailing spaces explicitly Trailing spaces are smashed by @command{autom4te}. This is a feature. @item to produce quadrigraphs and other strings reserved by m4sugar For instance @samp{@@<@@&t@@:@@} produces @samp{@@<:@@}. For a more contrived example: @example m4_define([a], [A])m4_define([b], [B])m4_define([c], [C])dnl m4_split([a )@}>=- b -=<@{( c]) @result{}[a], [], [B], [], [c] m4_split([a )@}@@&t@@>=- b -=<@@&t@@@{( c]) @result{}[a], [)@}>=-], [b], [-=<@{(], [c] @end example @item to escape @emph{occurrences} of forbidden patterns For instance you might want to mention @code{AC_FOO} in a comment, while still being sure that @command{autom4te} still catches unexpanded @samp{AC_*}. Then write @samp{AC@@&t@@_FOO}. @end itemize The name @samp{@@&t@@} was suggested by Paul Eggert: @quotation I should give some credit to the @samp{@@&t@@} pun. The @samp{&} is my own invention, but the @samp{t} came from the source code of the ALGOL68C compiler, written by Steve Bourne (of Bourne shell fame), and which used @samp{mt} to denote the empty string. In C, it would have looked like something like: @example char const mt[] = ""; @end example @noindent but of course the source code was written in Algol 68. I don't know where he got @samp{mt} from: it could have been his own invention, and I suppose it could have been a common pun around the Cambridge University computer lab at the time. @end quotation @node Balancing Parentheses @subsection Dealing with unbalanced parentheses @cindex balancing parentheses @cindex parentheses, balancing @cindex unbalanced parentheses, managing One of the pitfalls of portable shell programming is that if you intend your script to run with obsolescent shells, @command{case} statements require unbalanced parentheses. @xref{case, , Limitations of Shell Builtins}. With syntax highlighting editors, the presence of unbalanced @samp{)} can interfere with editors that perform syntax highlighting of macro contents based on finding the matching @samp{(}. Another concern is how much editing must be done when transferring code snippets between shell scripts and macro definitions. But most importantly, the presence of unbalanced parentheses can introduce expansion bugs. For an example, here is an underquoted attempt to use the macro @code{my_case}, which happens to expand to a portable @command{case} statement: @example AC_DEFUN([my_case], [case $file_name in *.c) echo "C source code";; esac]) AS_IF(:, my_case) @end example @noindent In the above example, the @code{AS_IF} call under-quotes its arguments. As a result, the unbalanced @samp{)} generated by the premature expansion of @code{my_case} results in expanding @code{AS_IF} with a truncated parameter, and the expansion is syntactically invalid: @example if :; then case $file_name in *.c fi echo "C source code";; esac) @end example If nothing else, this should emphasize the importance of the quoting arguments to macro calls. On the other hand, there are several variations for defining @code{my_case} to be more robust, even when used without proper quoting, each with some benefits and some drawbacks. @itemize @w{} @item Use left parenthesis before pattern @example AC_DEFUN([my_case], [case $file_name in (*.c) echo "C source code";; esac]) @end example @noindent This is simple and provides balanced parentheses. Although this is not portable to obsolescent shells (notably Solaris 10 @command{/bin/sh}), platforms with these shells invariably have a more-modern shell available somewhere so this approach typically suffices nowadays. @item Creative literal shell comment @example AC_DEFUN([my_case], [case $file_name in #( *.c) echo "C source code";; esac]) @end example @noindent This version provides balanced parentheses to several editors, and can be copied and pasted into a terminal as is. Unfortunately, it is still unbalanced as an Autoconf argument, since @samp{#(} is an M4 comment that masks the normal properties of @samp{(}. @item Quadrigraph shell comment @example AC_DEFUN([my_case], [case $file_name in @@%:@@( *.c) echo "C source code";; esac]) @end example @noindent This version provides balanced parentheses to even more editors, and can be used as a balanced Autoconf argument. Unfortunately, it requires some editing before it can be copied and pasted into a terminal, and the use of the quadrigraph @samp{@@%:@@} for @samp{#} reduces readability. @item Quoting just the parenthesis @example AC_DEFUN([my_case], [case $file_name in *.c[)] echo "C source code";; esac]) @end example @noindent This version quotes the @samp{)}, so that it can be used as a balanced Autoconf argument. As written, this is not balanced to an editor, but it can be coupled with @samp{[#(]} to meet that need, too. However, it still requires some edits before it can be copied and pasted into a terminal. @item Double-quoting the entire statement @example AC_DEFUN([my_case], [[case $file_name in #( *.c) echo "C source code";; esac]]) @end example @noindent Since the entire macro is double-quoted, there is no problem with using this as an Autoconf argument; and since the double-quoting is over the entire statement, this code can be easily copied and pasted into a terminal. However, the double quoting prevents the expansion of any macros inside the case statement, which may cause its own set of problems. @item Using @code{AS_CASE} @example AC_DEFUN([my_case], [AS_CASE([$file_name], [*.c], [echo "C source code"])]) @end example @noindent This version avoids the balancing issue altogether, by relying on @code{AS_CASE} (@pxref{Common Shell Constructs}); it also allows for the expansion of @code{AC_REQUIRE} to occur prior to the entire case statement, rather than within a branch of the case statement that might not be taken. However, the abstraction comes with a penalty that it is no longer a quick copy, paste, and edit to get back to shell code. @end itemize @node Quotation Rule Of Thumb @subsection Quotation Rule Of Thumb To conclude, the quotation rule of thumb is: @center @emph{One pair of quotes per pair of parentheses.} Never over-quote, never under-quote, in particular in the definition of macros. In the few places where the macros need to use brackets (usually in C program text or regular expressions), properly quote @emph{the arguments}! It is common to read Autoconf programs with snippets like: @example AC_TRY_LINK( changequote(<<, >>)dnl <<#include <time.h> #ifndef tzname /* For SGI. */ extern char *tzname[]; /* RS6000 and others reject char **tzname. */ #endif>>, changequote([, ])dnl [atoi (*tzname);], ac_cv_var_tzname=yes, ac_cv_var_tzname=no) @end example @noindent which is incredibly useless since @code{AC_TRY_LINK} is @emph{already} double quoting, so you just need: @example AC_TRY_LINK( [#include <time.h> #ifndef tzname /* For SGI. */ extern char *tzname[]; /* RS6000 and others reject char **tzname. */ #endif], [atoi (*tzname);], [ac_cv_var_tzname=yes], [ac_cv_var_tzname=no]) @end example @noindent The M4-fluent reader might note that these two examples are rigorously equivalent, since M4 swallows both the @samp{changequote(<<, >>)} and @samp{<<} @samp{>>} when it @dfn{collects} the arguments: these quotes are not part of the arguments! Simplified, the example above is just doing this: @example changequote(<<, >>)dnl <<[]>> changequote([, ])dnl @end example @noindent instead of simply: @example [[]] @end example With macros that do not double quote their arguments (which is the rule), double-quote the (risky) literals: @example AC_LINK_IFELSE([AC_LANG_PROGRAM( [[#include <time.h> #ifndef tzname /* For SGI. */ extern char *tzname[]; /* RS6000 and others reject char **tzname. */ #endif]], [atoi (*tzname);])], [ac_cv_var_tzname=yes], [ac_cv_var_tzname=no]) @end example Please note that the macro @code{AC_TRY_LINK} is obsolete, so you really should be using @code{AC_LINK_IFELSE} instead. @xref{Quadrigraphs}, for what to do if you run into a hopeless case where quoting does not suffice. When you create a @command{configure} script using newly written macros, examine it carefully to check whether you need to add more quotes in your macros. If one or more words have disappeared in the M4 output, you need more quotes. When in doubt, quote. However, it's also possible to put on too many layers of quotes. If this happens, the resulting @command{configure} script may contain unexpanded macros. The @command{autoconf} program checks for this problem by looking for the string @samp{AC_} in @file{configure}. However, this heuristic does not work in general: for example, it does not catch overquoting in @code{AC_DEFINE} descriptions. @c ---------------------------------------- Using autom4te @node Using autom4te @section Using @command{autom4te} The Autoconf suite, including M4sugar, M4sh, and Autotest, in addition to Autoconf per se, heavily rely on M4. All these different uses revealed common needs factored into a layer over M4: @command{autom4te}@footnote{ @c Yet another great name from Lars J. Aas. @c }. @command{autom4te} is a preprocessor that is like @command{m4}. It supports M4 extensions designed for use in tools like Autoconf. @menu * autom4te Invocation:: A GNU M4 wrapper * Customizing autom4te:: Customizing the Autoconf package @end menu @node autom4te Invocation @subsection Invoking @command{autom4te} The command line arguments are modeled after M4's: @example autom4te @var{options} @var{files} @end example @noindent @evindex M4 where the @var{files} are directly passed to @command{m4}. By default, GNU M4 is found during configuration, but the environment variable @env{M4} can be set to tell @command{autom4te} where to look. In addition to the regular expansion, it handles the replacement of the quadrigraphs (@pxref{Quadrigraphs}), and of @samp{__oline__}, the current line in the output. It supports an extended syntax for the @var{files}: @table @file @item @var{file}.m4f This file is an M4 frozen file. Note that @emph{all the previous files are ignored}. See the @option{--melt} option for the rationale. @item @var{file}? If found in the library path, the @var{file} is included for expansion, otherwise it is ignored instead of triggering a failure. @end table @sp 1 Of course, it supports the Autoconf common subset of options: @table @option @item --help @itemx -h Print a summary of the command line options and exit. @item --version @itemx -V Print the version number of Autoconf and exit. @item --verbose @itemx -v Report processing steps. @item --debug @itemx -d Don't remove the temporary files and be even more verbose. @item --include=@var{dir} @itemx -I @var{dir} Also look for input files in @var{dir}. Multiple invocations accumulate. @item --output=@var{file} @itemx -o @var{file} Save output (script or trace) to @var{file}. The file @option{-} stands for the standard output. @end table @sp 1 As an extension of @command{m4}, it includes the following options: @table @option @item --warnings=@var{category}[,@var{category}...] @itemx -W@var{category}[,@var{category}...] @evindex WARNINGS Enable or disable warnings related to each @var{category}. @xref{m4_warn}, for a comprehensive list of categories. Special values include: @table @samp @item all Enable all categories of warnings. @item none Disable all categories of warnings. @item error Treat all warnings as errors. @item no-@var{category} Disable warnings falling into @var{category}. @end table The enviroment variable @env{WARNINGS} may also be set to a comma-separated list of warning categories to enable or disable. It is interpreted exactly the same way as the argument of @option{--warnings}, but unknown categories are silently ignored. The command line takes precedence; for instance, if @env{WARNINGS} is set to @code{obsolete}, but @option{-Wnone} is given on the command line, no warnings will be issued. Some categories of warnings are on by default. Again, for details see @ref{m4_warn}. @item --melt @itemx -M Do not use frozen files. Any argument @code{@var{file}.m4f} is replaced by @code{@var{file}.m4}. This helps tracing the macros which are executed only when the files are frozen, typically @code{m4_define}. For instance, running: @example autom4te --melt 1.m4 2.m4f 3.m4 4.m4f input.m4 @end example @noindent is roughly equivalent to running: @example m4 1.m4 2.m4 3.m4 4.m4 input.m4 @end example @noindent while @example autom4te 1.m4 2.m4f 3.m4 4.m4f input.m4 @end example @noindent is equivalent to: @example m4 --reload-state=4.m4f input.m4 @end example @item --freeze @itemx -F Produce a frozen state file. @command{autom4te} freezing is stricter than M4's: it must produce no warnings, and no output other than empty lines (a line with white space is @emph{not} empty) and comments (starting with @samp{#}). Unlike @command{m4}'s similarly-named option, this option takes no argument: @example autom4te 1.m4 2.m4 3.m4 --freeze --output=3.m4f @end example @noindent corresponds to @example m4 1.m4 2.m4 3.m4 --freeze-state=3.m4f @end example @item --mode=@var{octal-mode} @itemx -m @var{octal-mode} Set the mode of the non-traces output to @var{octal-mode}; by default @samp{0666}. @end table @sp 1 @cindex @file{autom4te.cache} As another additional feature over @command{m4}, @command{autom4te} caches its results. GNU M4 is able to produce a regular output and traces at the same time. Traces are heavily used in the GNU Build System: @command{autoheader} uses them to build @file{config.h.in}, @command{autoreconf} to determine what GNU Build System components are used, @command{automake} to ``parse'' @file{configure.ac} etc. To avoid recomputation, traces are cached while performing regular expansion, and conversely. This cache is (actually, the caches are) stored in the directory @file{autom4te.cache}. @emph{It can safely be removed} at any moment (especially if for some reason @command{autom4te} considers it trashed). @table @option @item --cache=@var{directory} @itemx -C @var{directory} Specify the name of the directory where the result should be cached. Passing an empty value disables caching. Be sure to pass a relative file name, as for the time being, global caches are not supported. @item --no-cache Don't cache the results. @item --force @itemx -f If a cache is used, consider it obsolete (but update it anyway). @end table @sp 1 Because traces are so important to the GNU Build System, @command{autom4te} provides high level tracing features as compared to M4, and helps exploiting the cache: @table @option @item --trace=@var{macro}[:@var{format}] @itemx -t @var{macro}[:@var{format}] Trace the invocations of @var{macro} according to the @var{format}. Multiple @option{--trace} arguments can be used to list several macros. Multiple @option{--trace} arguments for a single macro are not cumulative; instead, you should just make @var{format} as long as needed. The @var{format} is a regular string, with newlines if desired, and several special escape codes. It defaults to @samp{$f:$l:$n:$%}. It can use the following special escapes: @table @samp @item $$ @c $$ restore font-lock The character @samp{$}. @item $f The file name from which @var{macro} is called. @item $l The line number from which @var{macro} is called. @item $d The depth of the @var{macro} call. This is an M4 technical detail that you probably don't want to know about. @item $n The name of the @var{macro}. @item $@var{num} The @var{num}th argument of the call to @var{macro}. @item $@@ @itemx $@var{sep}@@ @itemx $@{@var{separator}@}@@ All the arguments passed to @var{macro}, separated by the character @var{sep} or the string @var{separator} (@samp{,} by default). Each argument is quoted, i.e., enclosed in a pair of square brackets. @item $* @itemx $@var{sep}* @itemx $@{@var{separator}@}* As above, but the arguments are not quoted. @item $% @itemx $@var{sep}% @itemx $@{@var{separator}@}% As above, but the arguments are not quoted, all new line characters in the arguments are smashed, and the default separator is @samp{:}. The escape @samp{$%} produces single-line trace outputs (unless you put newlines in the @samp{separator}), while @samp{$@@} and @samp{$*} do not. @end table @xref{autoconf Invocation}, for examples of trace uses. @item --preselect=@var{macro} @itemx -p @var{macro} Cache the traces of @var{macro}, but do not enable traces. This is especially important to save CPU cycles in the future. For instance, when invoked, @command{autoconf} pre-selects all the macros that @command{autoheader}, @command{automake}, @command{autoreconf}, etc., trace, so that running @command{m4} is not needed to trace them: the cache suffices. This results in a huge speed-up. @end table @sp 1 @cindex Autom4te Library Finally, @command{autom4te} introduces the concept of @dfn{Autom4te libraries}. They consists in a powerful yet extremely simple feature: sets of combined command line arguments: @table @option @item --language=@var{language} @itemx -l @var{language} Use the @var{language} Autom4te library. Current languages include: @table @code @item M4sugar create M4sugar output. @item M4sh create M4sh executable shell scripts. @item Autotest create Autotest executable test suites. @item Autoconf-without-aclocal-m4 create Autoconf executable configure scripts without reading @file{aclocal.m4}. @item Autoconf create Autoconf executable configure scripts. This language inherits all the characteristics of @code{Autoconf-without-aclocal-m4} and additionally reads @file{aclocal.m4}. @end table @item --prepend-include=@var{dir} @itemx -B @var{dir} Prepend directory @var{dir} to the search path. This is used to include the language-specific files before any third-party macros. @end table @cindex @file{autom4te.cfg} As an example, if Autoconf is installed in its default location, @file{/usr/local}, the command @samp{autom4te -l m4sugar foo.m4} is strictly equivalent to the command: @example autom4te --prepend-include /usr/local/share/autoconf \ m4sugar/m4sugar.m4f foo.m4 @end example @noindent Recursive expansion applies here: the command @samp{autom4te -l m4sh foo.m4} is the same as @samp{autom4te --language M4sugar m4sugar/m4sh.m4f foo.m4}, i.e.: @example autom4te --prepend-include /usr/local/share/autoconf \ m4sugar/m4sugar.m4f m4sugar/m4sh.m4f --mode 777 foo.m4 @end example @noindent The definition of the languages is stored in @file{autom4te.cfg}. @node Customizing autom4te @subsection Customizing @command{autom4te} One can customize @command{autom4te} via @file{~/.autom4te.cfg} (i.e., as found in the user home directory), and @file{./.autom4te.cfg} (i.e., as found in the directory from which @command{autom4te} is run). The order is first reading @file{autom4te.cfg}, then @file{~/.autom4te.cfg}, then @file{./.autom4te.cfg}, and finally the command line arguments. In these text files, comments are introduced with @code{#}, and empty lines are ignored. Customization is performed on a per-language basis, wrapped in between a @samp{begin-language: "@var{language}"}, @samp{end-language: "@var{language}"} pair. Customizing a language stands for appending options (@pxref{autom4te Invocation}) to the current definition of the language. Options, and more generally arguments, are introduced by @samp{args: @var{arguments}}. You may use the traditional shell syntax to quote the @var{arguments}. As an example, to disable Autoconf caches (@file{autom4te.cache}) globally, include the following lines in @file{~/.autom4te.cfg}: @verbatim ## ------------------ ## ## User Preferences. ## ## ------------------ ## begin-language: "Autoconf-without-aclocal-m4" args: --no-cache end-language: "Autoconf-without-aclocal-m4" @end verbatim @node Programming in M4sugar @section Programming in M4sugar @cindex M4sugar M4 by itself provides only a small, but sufficient, set of all-purpose macros. M4sugar introduces additional generic macros. Its name was coined by Lars J. Aas: ``Readability And Greater Understanding Stands 4 M4sugar''. M4sugar reserves the macro namespace @samp{^_m4_} for internal use, and the macro namespace @samp{^m4_} for M4sugar macros. You should not define your own macros into these namespaces. @menu * Redefined M4 Macros:: M4 builtins changed in M4sugar * Diagnostic Macros:: Diagnostic messages from M4sugar * Diversion support:: Diversions in M4sugar * Conditional constructs:: Conditions in M4 * Looping constructs:: Iteration in M4 * Evaluation Macros:: More quotation and evaluation control * Text processing Macros:: String manipulation in M4 * Number processing Macros:: Arithmetic computation in M4 * Set manipulation Macros:: Set manipulation in M4 * Forbidden Patterns:: Catching unexpanded macros @end menu @node Redefined M4 Macros @subsection Redefined M4 Macros @msindex{builtin} @msindex{changecom} @msindex{changequote} @msindex{debugfile} @msindex{debugmode} @msindex{decr} @msindex{define} @msindex{divnum} @msindex{errprint} @msindex{esyscmd} @msindex{eval} @msindex{format} @msindex{ifdef} @msindex{incr} @msindex{index} @msindex{indir} @msindex{len} @msindex{pushdef} @msindex{shift} @msindex{substr} @msindex{syscmd} @msindex{sysval} @msindex{traceoff} @msindex{traceon} @msindex{translit} With a few exceptions, all the M4 native macros are moved in the @samp{m4_} pseudo-namespace, e.g., M4sugar renames @code{define} as @code{m4_define} etc. The list of macros unchanged from M4, except for their name, is: @itemize @minus @item m4_builtin @item m4_changecom @item m4_changequote @item m4_debugfile @item m4_debugmode @item m4_decr @item m4_define @item m4_divnum @item m4_errprint @item m4_esyscmd @item m4_eval @item m4_format @item m4_ifdef @item m4_incr @item m4_index @item m4_indir @item m4_len @item m4_pushdef @item m4_shift @item m4_substr @item m4_syscmd @item m4_sysval @item m4_traceoff @item m4_traceon @item m4_translit @end itemize Some M4 macros are redefined, and are slightly incompatible with their native equivalent. @defmac __file__ @defmacx __line__ @MSindex __file__ @MSindex __line__ All M4 macros starting with @samp{__} retain their original name: for example, no @code{m4__file__} is defined. @end defmac @defmac __oline__ @MSindex __oline__ This is not technically a macro, but a feature of Autom4te. The sequence @code{__oline__} can be used similarly to the other m4sugar location macros, but rather than expanding to the location of the input file, it is translated to the line number where it appears in the output file after all other M4 expansions. @end defmac @defmac dnl @MSindex dnl This macro kept its original name: no @code{m4_dnl} is defined. @end defmac @defmac m4_bpatsubst (@var{string}, @var{regexp}, @ovar{replacement}) @msindex{bpatsubst} This macro corresponds to @code{patsubst}. The name @code{m4_patsubst} is kept for future versions of M4sugar, once GNU M4 2.0 is released and supports extended regular expression syntax. @end defmac @defmac m4_bregexp (@var{string}, @var{regexp}, @ovar{replacement}) @msindex{bregexp} This macro corresponds to @code{regexp}. The name @code{m4_regexp} is kept for future versions of M4sugar, once GNU M4 2.0 is released and supports extended regular expression syntax. @end defmac @defmac m4_copy (@var{source}, @var{dest}) @defmacx m4_copy_force (@var{source}, @var{dest}) @defmacx m4_rename (@var{source}, @var{dest}) @defmacx m4_rename_force (@var{source}, @var{dest}) @msindex{copy} @msindex{copy_force} @msindex{rename} @msindex{rename_force} These macros aren't directly builtins, but are closely related to @code{m4_pushdef} and @code{m4_defn}. @code{m4_copy} and @code{m4_rename} ensure that @var{dest} is undefined, while @code{m4_copy_force} and @code{m4_rename_force} overwrite any existing definition. All four macros then proceed to copy the entire pushdef stack of definitions of @var{source} over to @var{dest}. @code{m4_copy} and @code{m4_copy_force} preserve the source (including in the special case where @var{source} is undefined), while @code{m4_rename} and @code{m4_rename_force} undefine the original macro name (making it an error to rename an undefined @var{source}). Note that attempting to invoke a renamed macro might not work, since the macro may have a dependence on helper macros accessed via composition of @samp{$0} but that were not also renamed; likewise, other macros may have a hard-coded dependence on @var{source} and could break if @var{source} has been deleted. On the other hand, it is always safe to rename a macro to temporarily move it out of the way, then rename it back later to restore original semantics. @end defmac @defmac m4_defn (@var{macro}@dots{}) @msindex{defn} This macro fails if @var{macro} is not defined, even when using older versions of M4 that did not warn. See @code{m4_undefine}. Unfortunately, in order to support these older versions of M4, there are some situations involving unbalanced quotes where concatenating multiple macros together will work in newer M4 but not in m4sugar; use quadrigraphs to work around this. @end defmac @defmac m4_divert (@var{diversion}) @msindex{divert} M4sugar relies heavily on diversions, so rather than behaving as a primitive, @code{m4_divert} behaves like: @example m4_divert_pop()m4_divert_push([@var{diversion}]) @end example @noindent @xref{Diversion support}, for more details about the use of the diversion stack. In particular, this implies that @var{diversion} should be a named diversion rather than a raw number. But be aware that it is seldom necessary to explicitly change the diversion stack, and that when done incorrectly, it can lead to syntactically invalid scripts. @end defmac @defmac m4_dumpdef (@var{name}@dots{}) @defmacx m4_dumpdefs (@var{name}@dots{}) @msindex{dumpdef} @msindex{dumpdefs} @code{m4_dumpdef} is like the M4 builtin, except that this version requires at least one argument, output always goes to standard error rather than the current debug file, no sorting is done on multiple arguments, and an error is issued if any @var{name} is undefined. @code{m4_dumpdefs} is a convenience macro that calls @code{m4_dumpdef} for all of the @code{m4_pushdef} stack of definitions, starting with the current, and silently does nothing if @var{name} is undefined. Unfortunately, due to a limitation in M4 1.4.x, any macro defined as a builtin is output as the empty string. This behavior is rectified by using M4 1.6 or newer. However, this behavior difference means that @code{m4_dumpdef} should only be used while developing m4sugar macros, and never in the final published form of a macro. @end defmac @defmac m4_esyscmd_s (@var{command}) @msindex{esyscmd_s} Like @code{m4_esyscmd}, this macro expands to the result of running @var{command} in a shell. The difference is that any trailing newlines are removed, so that the output behaves more like shell command substitution. @end defmac @defmac m4_exit (@var{exit-status}) @msindex{exit} This macro corresponds to @code{m4exit}. @end defmac @defmac m4_if (@var{comment}) @defmacx m4_if (@var{string-1}, @var{string-2}, @var{equal}, @ovar{not-equal}) @defmacx m4_if (@var{string-1}, @var{string-2}, @var{equal-1}, @ @var{string-3}, @var{string-4}, @var{equal-2}, @dots{}, @ovar{not-equal}) @msindex{if} This macro corresponds to @code{ifelse}. @var{string-1} and @var{string-2} are compared literally, so usually one of the two arguments is passed unquoted. @xref{Conditional constructs}, for more conditional idioms. @end defmac @defmac m4_include (@var{file}) @defmacx m4_sinclude (@var{file}) @msindex{include} @msindex{sinclude} Like the M4 builtins, but warn against multiple inclusions of @var{file}. @end defmac @defmac m4_mkstemp (@var{template}) @defmacx m4_maketemp (@var{template}) @msindex{maketemp} @msindex{mkstemp} Posix requires @code{maketemp} to replace the trailing @samp{X} characters in @var{template} with the process id, without regards to the existence of a file by that name, but this a security hole. When this was pointed out to the Posix folks, they agreed to invent a new macro @code{mkstemp} that always creates a uniquely named file, but not all versions of GNU M4 support the new macro. In M4sugar, @code{m4_maketemp} and @code{m4_mkstemp} are synonyms for each other, and both have the secure semantics regardless of which macro the underlying M4 provides. @end defmac @defmac m4_popdef (@var{macro}@dots{}) @msindex{popdef} This macro fails if @var{macro} is not defined, even when using older versions of M4 that did not warn. See @code{m4_undefine}. @end defmac @defmac m4_undefine (@var{macro}@dots{}) @msindex{undefine} This macro fails if @var{macro} is not defined, even when using older versions of M4 that did not warn. Use @example m4_ifdef([@var{macro}], [m4_undefine([@var{macro}])]) @end example @noindent if you are not sure whether @var{macro} is defined. @end defmac @defmac m4_undivert (@var{diversion}@dots{}) @msindex{undivert} Unlike the M4 builtin, at least one @var{diversion} must be specified. Also, since the M4sugar diversion stack prefers named diversions, the use of @code{m4_undivert} to include files is risky. @xref{Diversion support}, for more details about the use of the diversion stack. But be aware that it is seldom necessary to explicitly change the diversion stack, and that when done incorrectly, it can lead to syntactically invalid scripts. @end defmac @defmac m4_wrap (@var{text}) @defmacx m4_wrap_lifo (@var{text}) @msindex{wrap} @msindex{wrap_lifo} These macros correspond to @code{m4wrap}. Posix requires arguments of multiple wrap calls to be reprocessed at EOF in the same order as the original calls (first-in, first-out). GNU M4 versions through 1.4.10, however, reprocess them in reverse order (last-in, first-out). Both orders are useful, therefore, you can rely on @code{m4_wrap} to provide FIFO semantics and @code{m4_wrap_lifo} for LIFO semantics, regardless of the underlying GNU M4 version. Unlike the GNU M4 builtin, these macros only recognize one argument, and avoid token pasting between consecutive invocations. On the other hand, nested calls to @code{m4_wrap} from within wrapped text work just as in the builtin. @end defmac @node Diagnostic Macros @subsection Diagnostic messages from M4sugar @cindex Messages, from @command{M4sugar} When macros statically diagnose abnormal situations, benign or fatal, they should report them using these macros. For issuing dynamic issues, i.e., when @command{configure} is run, see @ref{Printing Messages}. @defmac m4_assert (@var{expression}, @dvar{exit-status, 1}) @msindex{assert} Assert that the arithmetic @var{expression} evaluates to non-zero. Otherwise, issue a fatal error, and exit @command{autom4te} with @var{exit-status}. @end defmac @defmac m4_errprintn (@var{message}) @msindex{errprintn} Similar to the builtin @code{m4_errprint}, except that a newline is guaranteed after @var{message}. @end defmac @anchor{m4_fatal} @defmac m4_fatal (@var{message}) @msindex{fatal} Report a severe error @var{message} prefixed with the current location, and have @command{autom4te} die. @end defmac @defmac m4_location @msindex{location} Useful as a prefix in a message line. Short for: @example __file__:__line__ @end example @end defmac @anchor{m4_warn} @defmac m4_warn (@var{category}, @var{message}) @msindex{warn} Report @var{message} as a warning (or as an error if requested by the user) if warnings of the @var{category} are turned on. If the message is emitted, it is prefixed with the current location, and followed by a call trace of all macros defined via @code{AC_DEFUN} used to get to the current expansion. The @var{category} must be one of: @table @samp @item cross Warnings about constructs that may interfere with cross-compilation, such as using @code{AC_RUN_IFELSE} without a default. @item gnu Warnings related to the GNU Coding Standards (@pxref{Top,,, standards, The GNU Coding Standards}). On by default. @item obsolete Warnings about obsolete features. On by default. @item override Warnings about redefinitions of Autoconf internals. @item portability Warnings about non-portable constructs. @item portability-recursive Warnings about recursive Make variable expansions (@code{$(foo$(x))}). @item extra-portability Extra warnings about non-portable constructs, covering rarely-used tools. @item syntax Warnings about questionable syntactic constructs, incorrectly ordered macro calls, typos, etc. On by default. @item unsupported Warnings about unsupported features. On by default. @end table @strong{Hacking Note:} The set of categories is defined by code in @command{autom4te}, not by M4sugar itself. Additions should be coordinated with Automake, so that both sets of tools accept the same options. @end defmac @node Diversion support @subsection Diversion support M4sugar makes heavy use of diversions under the hood, because it is often the case that text that must appear early in the output is not discovered until late in the input. Additionally, some of the topological sorting algorithms used in resolving macro dependencies use diversions. However, most macros should not need to change diversions directly, but rather rely on higher-level M4sugar macros to manage diversions transparently. If you change diversions improperly, you risk generating a syntactically invalid script, because an incorrect diversion will violate assumptions made by many macros about whether prerequisite text has been previously output. In short, if you manually change the diversion, you should not expect any macros provided by the Autoconf package to work until you have restored the diversion stack back to its original state. In the rare case that it is necessary to write a macro that explicitly outputs text to a different diversion, it is important to be aware of an M4 limitation regarding diversions: text only goes to a diversion if it is not part of argument collection. Therefore, any macro that changes the current diversion cannot be used as an unquoted argument to another macro, but must be expanded at the top level. The macro @code{m4_expand} will diagnose any attempt to change diversions, since it is generally useful only as an argument to another macro. The following example shows what happens when diversion manipulation is attempted within macro arguments: @example m4_do([normal text] m4_divert_push([KILL])unwanted[]m4_divert_pop([KILL]) [m4_divert_push([KILL])discarded[]m4_divert_pop([KILL])])dnl @result{}normal text @result{}unwanted @end example @noindent Notice that the unquoted text @code{unwanted} is output, even though it was processed while the current diversion was @code{KILL}, because it was collected as part of the argument to @code{m4_do}. However, the text @code{discarded} disappeared as desired, because the diversion changes were single-quoted, and were not expanded until the top-level rescan of the output of @code{m4_do}. To make diversion management easier, M4sugar uses the concept of named diversions. Rather than using diversion numbers directly, it is nicer to associate a name with each diversion. The diversion number associated with a particular diversion name is an implementation detail, and a syntax warning is issued if a diversion number is used instead of a name. In general, you should not output text to a named diversion until after calling the appropriate initialization routine for your language (@code{m4_init}, @code{AS_INIT}, @code{AT_INIT}, @dots{}), although there are some exceptions documented below. M4sugar defines two named diversions. @table @code @item KILL Text written to this diversion is discarded. This is the default diversion once M4sugar is initialized. @item GROW This diversion is used behind the scenes by topological sorting macros, such as @code{AC_REQUIRE}. @end table M4sh adds several more named diversions. @table @code @item BINSH This diversion is reserved for the @samp{#!} interpreter line. @item HEADER-REVISION This diversion holds text from @code{AC_REVISION}. @item HEADER-COMMENT This diversion holds comments about the purpose of a file. @item HEADER-COPYRIGHT This diversion is managed by @code{AC_COPYRIGHT}. @item M4SH-SANITIZE This diversion contains M4sh sanitization code, used to ensure M4sh is executing in a reasonable shell environment. @item M4SH-INIT This diversion contains M4sh initialization code, initializing variables that are required by other M4sh macros. @item BODY This diversion contains the body of the shell code, and is the default diversion once M4sh is initialized. @end table Autotest inherits diversions from M4sh, and changes the default diversion from @code{BODY} back to @code{KILL}. It also adds several more named diversions, with the following subset designed for developer use. @table @code @item PREPARE_TESTS This diversion contains initialization sequences which are executed after @file{atconfig} and @file{atlocal}, and after all command line arguments have been parsed, but prior to running any tests. It can be used to set up state that is required across all tests. This diversion will work even before @code{AT_INIT}. @end table Autoconf inherits diversions from M4sh, and adds the following named diversions which developers can utilize. @table @code @item DEFAULTS This diversion contains shell variable assignments to set defaults that must be in place before arguments are parsed. This diversion is placed early enough in @file{configure} that it is unsafe to expand any autoconf macros into this diversion. @item HELP_ENABLE If @code{AC_PRESERVE_HELP_ORDER} was used, then text placed in this diversion will be included as part of a quoted here-doc providing all of the @option{--help} output of @file{configure} related to options created by @code{AC_ARG_WITH} and @code{AC_ARG_ENABLE}. @item INIT_PREPARE This diversion occurs after all command line options have been parsed, but prior to the main body of the @file{configure} script. This diversion is the last chance to insert shell code such as variable assignments or shell function declarations that will used by the expansion of other macros. @end table For now, the remaining named diversions of Autoconf, Autoheader, and Autotest are not documented. In other words, intentionally outputting text into an undocumented diversion is subject to breakage in a future release of Autoconf. @defmac m4_cleardivert (@var{diversion}@dots{}) @msindex{cleardivert} Permanently discard any text that has been diverted into @var{diversion}. @end defmac @defmac m4_divert_once (@var{diversion}, @ovar{content}) @msindex{divert_once} Similar to @code{m4_divert_text}, except that @var{content} is only output to @var{diversion} if this is the first time that @code{m4_divert_once} has been called with its particular arguments. @end defmac @defmac m4_divert_pop (@ovar{diversion}) @msindex{divert_pop} If provided, check that the current diversion is indeed @var{diversion}. Then change to the diversion located earlier on the stack, giving an error if an attempt is made to pop beyond the initial m4sugar diversion of @code{KILL}. @end defmac @defmac m4_divert_push (@var{diversion}) @msindex{divert_push} Remember the former diversion on the diversion stack, and output subsequent text into @var{diversion}. M4sugar maintains a diversion stack, and issues an error if there is not a matching pop for every push. @end defmac @anchor{m4_divert_text} @defmac m4_divert_text (@var{diversion}, @ovar{content}) @msindex{divert_text} Output @var{content} and a newline into @var{diversion}, without affecting the current diversion. Shorthand for: @example m4_divert_push([@var{diversion}])@var{content} m4_divert_pop([@var{diversion}])dnl @end example One use of @code{m4_divert_text} is to develop two related macros, where macro @samp{MY_A} does the work, but adjusts what work is performed based on whether the optional macro @samp{MY_B} has also been expanded. Of course, it is possible to use @code{AC_BEFORE} within @code{MY_A} to require that @samp{MY_B} occurs first, if it occurs at all. But this imposes an ordering restriction on the user; it would be nicer if macros @samp{MY_A} and @samp{MY_B} can be invoked in either order. The trick is to let @samp{MY_B} leave a breadcrumb in an early diversion, which @samp{MY_A} can then use to determine whether @samp{MY_B} has been expanded. @example AC_DEFUN([MY_A], [# various actions if test -n "$b_was_used"; then # extra action fi]) AC_DEFUN([MY_B], [AC_REQUIRE([MY_A])dnl m4_divert_text([INIT_PREPARE], [b_was_used=true])]) @end example @end defmac @defmac m4_init @msindex{init} Initialize the M4sugar environment, setting up the default named diversion to be @code{KILL}. @end defmac @node Conditional constructs @subsection Conditional constructs The following macros provide additional conditional constructs as convenience wrappers around @code{m4_if}. @defmac m4_bmatch (@var{string}, @var{regex-1}, @var{value-1}, @ @ovar{regex-2}, @ovar{value-2}, @dots{}, @ovar{default}) @msindex{bmatch} The string @var{string} is repeatedly compared against a series of @var{regex} arguments; if a match is found, the expansion is the corresponding @var{value}, otherwise, the macro moves on to the next @var{regex}. If no @var{regex} match, then the result is the optional @var{default}, or nothing. @end defmac @defmac m4_bpatsubsts (@var{string}, @var{regex-1}, @var{subst-1}, @ @ovar{regex-2}, @ovar{subst-2}, @dots{}) @msindex{bpatsubsts} The string @var{string} is altered by @var{regex-1} and @var{subst-1}, as if by: @example m4_bpatsubst([[@var{string}]], [@var{regex}], [@var{subst}]) @end example @noindent The result of the substitution is then passed through the next set of @var{regex} and @var{subst}, and so forth. An empty @var{subst} implies deletion of any matched portions in the current string. Note that this macro over-quotes @var{string}; this behavior is intentional, so that the result of each step of the recursion remains as a quoted string. However, it means that anchors (@samp{^} and @samp{$} in the @var{regex} will line up with the extra quotations, and not the characters of the original string. The overquoting is removed after the final substitution. @end defmac @defmac m4_case (@var{string}, @var{value-1}, @var{if-value-1}, @ @ovar{value-2}, @ovar{if-value-2}, @dots{}, @ovar{default}) @msindex{case} Test @var{string} against multiple @var{value} possibilities, resulting in the first @var{if-value} for a match, or in the optional @var{default}. This is shorthand for: @example m4_if([@var{string}], [@var{value-1}], [@var{if-value-1}], [@var{string}], [@var{value-2}], [@var{if-value-2}], @dots{}, [@var{default}]) @end example @end defmac @defmac m4_cond (@var{test-1}, @var{value-1}, @var{if-value-1}, @ @ovar{test-2}, @ovar{value-2}, @ovar{if-value-2}, @dots{}, @ovar{default}) @msindex{cond} This macro was introduced in Autoconf 2.62. Similar to @code{m4_if}, except that each @var{test} is expanded only when it is encountered. This is useful for short-circuiting expensive tests; while @code{m4_if} requires all its strings to be expanded up front before doing comparisons, @code{m4_cond} only expands a @var{test} when all earlier tests have failed. For an example, these two sequences give the same result, but in the case where @samp{$1} does not contain a backslash, the @code{m4_cond} version only expands @code{m4_index} once, instead of five times, for faster computation if this is a common case for @samp{$1}. Notice that every third argument is unquoted for @code{m4_if}, and quoted for @code{m4_cond}: @example m4_if(m4_index([$1], [\]), [-1], [$2], m4_eval(m4_index([$1], [\\]) >= 0), [1], [$2], m4_eval(m4_index([$1], [\$]) >= 0), [1], [$2], m4_eval(m4_index([$1], [\`]) >= 0), [1], [$3], m4_eval(m4_index([$1], [\"]) >= 0), [1], [$3], [$2]) m4_cond([m4_index([$1], [\])], [-1], [$2], [m4_eval(m4_index([$1], [\\]) >= 0)], [1], [$2], [m4_eval(m4_index([$1], [\$]) >= 0)], [1], [$2], [m4_eval(m4_index([$1], [\`]) >= 0)], [1], [$3], [m4_eval(m4_index([$1], [\"]) >= 0)], [1], [$3], [$2]) @end example @end defmac @defmac m4_default (@var{expr-1}, @var{expr-2}) @defmacx m4_default_quoted (@var{expr-1}, @var{expr-2}) @defmacx m4_default_nblank (@var{expr-1}, @ovar{expr-2}) @defmacx m4_default_nblank_quoted (@var{expr-1}, @ovar{expr-2}) @msindex{default} @msindex{default_quoted} @msindex{default_nblank} @msindex{default_nblank_quoted} If @var{expr-1} contains text, use it. Otherwise, select @var{expr-2}. @code{m4_default} expands the result, while @code{m4_default_quoted} does not. Useful for providing a fixed default if the expression that results in @var{expr-1} would otherwise be empty. The difference between @code{m4_default} and @code{m4_default_nblank} is whether an argument consisting of just blanks (space, tab, newline) is significant. When using the expanding versions, note that an argument may contain text but still expand to an empty string. @example m4_define([active], [ACTIVE])dnl m4_define([empty], [])dnl m4_define([demo1], [m4_default([$1], [$2])])dnl m4_define([demo2], [m4_default_quoted([$1], [$2])])dnl m4_define([demo3], [m4_default_nblank([$1], [$2])])dnl m4_define([demo4], [m4_default_nblank_quoted([$1], [$2])])dnl demo1([active], [default]) @result{}ACTIVE demo1([], [active]) @result{}ACTIVE demo1([empty], [text]) @result{} -demo1([ ], [active])- @result{}- - demo2([active], [default]) @result{}active demo2([], [active]) @result{}active demo2([empty], [text]) @result{}empty -demo2([ ], [active])- @result{}- - demo3([active], [default]) @result{}ACTIVE demo3([], [active]) @result{}ACTIVE demo3([empty], [text]) @result{} -demo3([ ], [active])- @result{}-ACTIVE- demo4([active], [default]) @result{}active demo4([], [active]) @result{}active demo4([empty], [text]) @result{}empty -demo4([ ], [active])- @result{}-active- @end example @end defmac @defmac m4_define_default (@var{macro}, @ovar{default-definition}) @msindex{define_default} If @var{macro} does not already have a definition, then define it to @var{default-definition}. @end defmac @defmac m4_ifblank (@var{cond}, @ovar{if-blank}, @ovar{if-text}) @defmacx m4_ifnblank (@var{cond}, @ovar{if-text}, @ovar{if-blank}) @msindex{ifblank} @msindex{ifnblank} If @var{cond} is empty or consists only of blanks (space, tab, newline), then expand @var{if-blank}; otherwise, expand @var{if-text}. Two variants exist, in order to make it easier to select the correct logical sense when using only two parameters. Note that this is more efficient than the equivalent behavior of: @example m4_ifval(m4_normalize([@var{cond}]), @var{if-text}, @var{if-blank}) @end example @end defmac @defmac m4_ifndef (@var{macro}, @var{if-not-defined}, @ovar{if-defined}) @msindex{ifndef} This is shorthand for: @example m4_ifdef([@var{macro}], [@var{if-defined}], [@var{if-not-defined}]) @end example @end defmac @defmac m4_ifset (@var{macro}, @ovar{if-true}, @ovar{if-false}) @msindex{ifset} If @var{macro} is undefined, or is defined as the empty string, expand to @var{if-false}. Otherwise, expands to @var{if-true}. Similar to: @example m4_ifval(m4_defn([@var{macro}]), [@var{if-true}], [@var{if-false}]) @end example @noindent except that it is not an error if @var{macro} is undefined. @end defmac @defmac m4_ifval (@var{cond}, @ovar{if-true}, @ovar{if-false}) @msindex{ifval} Expands to @var{if-true} if @var{cond} is not empty, otherwise to @var{if-false}. This is shorthand for: @example m4_if([@var{cond}], [], [@var{if-false}], [@var{if-true}]) @end example @end defmac @defmac m4_ifvaln (@var{cond}, @ovar{if-true}, @ovar{if-false}) @msindex{ifvaln} Similar to @code{m4_ifval}, except guarantee that a newline is present after any non-empty expansion. Often followed by @code{dnl}. @end defmac @defmac m4_n (@var{text}) @msindex{n} Expand to @var{text}, and add a newline if @var{text} is not empty. Often followed by @code{dnl}. @end defmac @node Looping constructs @subsection Looping constructs The following macros are useful in implementing recursive algorithms in M4, including loop operations. An M4 list is formed by quoting a list of quoted elements; generally the lists are comma-separated, although @code{m4_foreach_w} is whitespace-separated. For example, the list @samp{[[a], [b,c]]} contains two elements: @samp{[a]} and @samp{[b,c]}. It is common to see lists with unquoted elements when those elements are not likely to be macro names, as in @samp{[fputc_unlocked, fgetc_unlocked]}. Although not generally recommended, it is possible for quoted lists to have side effects; all side effects are expanded only once, and prior to visiting any list element. On the other hand, the fact that unquoted macros are expanded exactly once means that macros without side effects can be used to generate lists. For example, @example m4_foreach([i], [[1], [2], [3]m4_errprintn([hi])], [i]) @error{}hi @result{}123 m4_define([list], [[1], [2], [3]]) @result{} m4_foreach([i], [list], [i]) @result{}123 @end example @defmac m4_argn (@var{n}, @ovar{arg}@dots{}) @msindex{argn} Extracts argument @var{n} (larger than 0) from the remaining arguments. If there are too few arguments, the empty string is used. For any @var{n} besides 1, this is more efficient than the similar @samp{m4_car(m4_shiftn([@var{n}], [], [@var{arg}@dots{}]))}. @end defmac @defmac m4_car (@var{arg}@dots{}) @msindex{car} Expands to the quoted first @var{arg}. Can be used with @code{m4_cdr} to recursively iterate through a list. Generally, when using quoted lists of quoted elements, @code{m4_car} should be called without any extra quotes. @end defmac @defmac m4_cdr (@var{arg}@dots{}) @msindex{cdr} Expands to a quoted list of all but the first @var{arg}, or the empty string if there was only one argument. Generally, when using quoted lists of quoted elements, @code{m4_cdr} should be called without any extra quotes. For example, this is a simple implementation of @code{m4_map}; note how each iteration checks for the end of recursion, then merely applies the first argument to the first element of the list, then repeats with the rest of the list. (The actual implementation in M4sugar is a bit more involved, to gain some speed and share code with @code{m4_map_sep}, and also to avoid expanding side effects in @samp{$2} twice). @example m4_define([m4_map], [m4_ifval([$2], [m4_apply([$1], m4_car($2))[]$0([$1], m4_cdr($2))])])dnl m4_map([ m4_eval], [[[1]], [[1+1]], [[10],[16]]]) @result{} 1 2 a @end example @end defmac @defmac m4_for (@var{var}, @var{first}, @var{last}, @ovar{step}, @ @var{expression}) @msindex{for} Loop over the numeric values between @var{first} and @var{last} including bounds by increments of @var{step}. For each iteration, expand @var{expression} with the numeric value assigned to @var{var}. If @var{step} is omitted, it defaults to @samp{1} or @samp{-1} depending on the order of the limits. If given, @var{step} has to match this order. The number of iterations is determined independently from definition of @var{var}; iteration cannot be short-circuited or lengthened by modifying @var{var} from within @var{expression}. @end defmac @defmac m4_foreach (@var{var}, @var{list}, @var{expression}) @msindex{foreach} Loop over the comma-separated M4 list @var{list}, assigning each value to @var{var}, and expand @var{expression}. The following example outputs two lines: @example m4_foreach([myvar], [[foo], [bar, baz]], [echo myvar ])dnl @result{}echo foo @result{}echo bar, baz @end example Note that for some forms of @var{expression}, it may be faster to use @code{m4_map_args}. @end defmac @anchor{m4_foreach_w} @defmac m4_foreach_w (@var{var}, @var{list}, @var{expression}) @msindex{foreach_w} Loop over the white-space-separated list @var{list}, assigning each value to @var{var}, and expand @var{expression}. If @var{var} is only referenced once in @var{expression}, it is more efficient to use @code{m4_map_args_w}. The deprecated macro @code{AC_FOREACH} is an alias of @code{m4_foreach_w}. @end defmac @defmac m4_map (@var{macro}, @var{list}) @defmacx m4_mapall (@var{macro}, @var{list}) @defmacx m4_map_sep (@var{macro}, @var{separator}, @var{list}) @defmacx m4_mapall_sep (@var{macro}, @var{separator}, @var{list}) @msindex{map} @msindex{mapall} @msindex{map_sep} @msindex{mapall_sep} Loop over the comma separated quoted list of argument descriptions in @var{list}, and invoke @var{macro} with the arguments. An argument description is in turn a comma-separated quoted list of quoted elements, suitable for @code{m4_apply}. The macros @code{m4_map} and @code{m4_map_sep} ignore empty argument descriptions, while @code{m4_mapall} and @code{m4_mapall_sep} invoke @var{macro} with no arguments. The macros @code{m4_map_sep} and @code{m4_mapall_sep} additionally expand @var{separator} between invocations of @var{macro}. Note that @var{separator} is expanded, unlike in @code{m4_join}. When separating output with commas, this means that the map result can be used as a series of arguments, by using a single-quoted comma as @var{separator}, or as a single string, by using a double-quoted comma. @example m4_map([m4_count], []) @result{} m4_map([ m4_count], [[], [[1]], [[1], [2]]]) @result{} 1 2 m4_mapall([ m4_count], [[], [[1]], [[1], [2]]]) @result{} 0 1 2 m4_map_sep([m4_eval], [,], [[[1+2]], [[10], [16]]]) @result{}3,a m4_map_sep([m4_echo], [,], [[[a]], [[b]]]) @result{}a,b m4_count(m4_map_sep([m4_echo], [,], [[[a]], [[b]]])) @result{}2 m4_map_sep([m4_echo], [[,]], [[[a]], [[b]]]) @result{}a,b m4_count(m4_map_sep([m4_echo], [[,]], [[[a]], [[b]]])) @result{}1 @end example @end defmac @defmac m4_map_args (@var{macro}, @var{arg}@dots{}) @msindex{map_args} Repeatedly invoke @var{macro} with each successive @var{arg} as its only argument. In the following example, three solutions are presented with the same expansion; the solution using @code{m4_map_args} is the most efficient. @example m4_define([active], [ACTIVE])dnl m4_foreach([var], [[plain], [active]], [ m4_echo(m4_defn([var]))]) @result{} plain active m4_map([ m4_echo], [[[plain]], [[active]]]) @result{} plain active m4_map_args([ m4_echo], [plain], [active]) @result{} plain active @end example In cases where it is useful to operate on additional parameters besides the list elements, the macro @code{m4_curry} can be used in @var{macro} to supply the argument currying necessary to generate the desired argument list. In the following example, @code{list_add_n} is more efficient than @code{list_add_x}. On the other hand, using @code{m4_map_args_sep} can be even more efficient. @example m4_define([list], [[1], [2], [3]])dnl m4_define([add], [m4_eval(([$1]) + ([$2]))])dnl dnl list_add_n(N, ARG...) dnl Output a list consisting of each ARG added to N m4_define([list_add_n], [m4_shift(m4_map_args([,m4_curry([add], [$1])], m4_shift($@@)))])dnl list_add_n([1], list) @result{}2,3,4 list_add_n([2], list) @result{}3,4,5 m4_define([list_add_x], [m4_shift(m4_foreach([var], m4_dquote(m4_shift($@@)), [,add([$1],m4_defn([var]))]))])dnl list_add_x([1], list) @result{}2,3,4 @end example @end defmac @defmac m4_map_args_pair (@var{macro}, @dvarv{macro-end, macro}, @ @var{arg}@dots{}) @msindex{map_args_pair} For every pair of arguments @var{arg}, invoke @var{macro} with two arguments. If there is an odd number of arguments, invoke @var{macro-end}, which defaults to @var{macro}, with the remaining argument. @example m4_map_args_pair([, m4_reverse], [], [1], [2], [3]) @result{}, 2, 1, 3 m4_map_args_pair([, m4_reverse], [, m4_dquote], [1], [2], [3]) @result{}, 2, 1, [3] m4_map_args_pair([, m4_reverse], [, m4_dquote], [1], [2], [3], [4]) @result{}, 2, 1, 4, 3 @end example @end defmac @defmac m4_map_args_sep (@ovar{pre}, @ovar{post}, @ovar{sep}, @var{arg}@dots{}) @msindex{map_args_sep} Expand the sequence @code{@var{pre}[@var{arg}]@var{post}} for each argument, additionally expanding @var{sep} between arguments. One common use of this macro is constructing a macro call, where the opening and closing parentheses are split between @var{pre} and @var{post}; in particular, @code{m4_map_args([@var{macro}], [@var{arg}])} is equivalent to @code{m4_map_args_sep([@var{macro}(], [)], [], [@var{arg}])}. This macro provides the most efficient means for iterating over an arbitrary list of arguments, particularly when repeatedly constructing a macro call with more arguments than @var{arg}. @end defmac @defmac m4_map_args_w (@var{string}, @ovar{pre}, @ovar{post}, @ovar{sep}) @msindex{map_args_w} Expand the sequence @code{@var{pre}[word]@var{post}} for each word in the whitespace-separated @var{string}, additionally expanding @var{sep} between words. This macro provides the most efficient means for iterating over a whitespace-separated string. In particular, @code{m4_map_args_w([@var{string}], [@var{action}(], [)])} is more efficient than @code{m4_foreach_w([var], [@var{string}], [@var{action}(m4_defn([var]))])}. @end defmac @defmac m4_shiftn (@var{count}, @dots{}) @defmacx m4_shift2 (@dots{}) @defmacx m4_shift3 (@dots{}) @msindex{shift2} @msindex{shift3} @msindex{shiftn} @code{m4_shiftn} performs @var{count} iterations of @code{m4_shift}, along with validation that enough arguments were passed in to match the shift count, and that the count is positive. @code{m4_shift2} and @code{m4_shift3} are specializations of @code{m4_shiftn}, introduced in Autoconf 2.62, and are more efficient for two and three shifts, respectively. @end defmac @defmac m4_stack_foreach (@var{macro}, @var{action}) @defmacx m4_stack_foreach_lifo (@var{macro}, @var{action}) @msindex{stack_foreach} @msindex{stack_foreach_lifo} For each of the @code{m4_pushdef} definitions of @var{macro}, expand @var{action} with the single argument of a definition of @var{macro}. @code{m4_stack_foreach} starts with the oldest definition, while @code{m4_stack_foreach_lifo} starts with the current definition. @var{action} should not push or pop definitions of @var{macro}, nor is there any guarantee that the current definition of @var{macro} matches the argument that was passed to @var{action}. The macro @code{m4_curry} can be used if @var{action} needs more than one argument, although in that case it is more efficient to use @var{m4_stack_foreach_sep}. Due to technical limitations, there are a few low-level m4sugar functions, such as @code{m4_pushdef}, that cannot be used as the @var{macro} argument. @example m4_pushdef([a], [1])m4_pushdef([a], [2])dnl m4_stack_foreach([a], [ m4_incr]) @result{} 2 3 m4_stack_foreach_lifo([a], [ m4_curry([m4_substr], [abcd])]) @result{} cd bcd @end example @end defmac @defmac m4_stack_foreach_sep (@var{macro}, @ovar{pre}, @ovar{post}, @ovar{sep}) @defmacx m4_stack_foreach_sep_lifo (@var{macro}, @ovar{pre}, @ovar{post}, @ @ovar{sep}) @msindex{stack_foreach_sep} @msindex{stack_foreach_sep_lifo} Expand the sequence @code{@var{pre}[definition]@var{post}} for each @code{m4_pushdef} definition of @var{macro}, additionally expanding @var{sep} between definitions. @code{m4_stack_foreach_sep} visits the oldest definition first, while @code{m4_stack_foreach_sep_lifo} visits the current definition first. This macro provides the most efficient means for iterating over a pushdef stack. In particular, @code{m4_stack_foreach([@var{macro}], [@var{action}])} is short for @code{m4_stack_foreach_sep([@var{macro}], [@var{action}(], [)])}. @end defmac @node Evaluation Macros @subsection Evaluation Macros The following macros give some control over the order of the evaluation by adding or removing levels of quotes. @defmac m4_apply (@var{macro}, @var{list}) @msindex{apply} Apply the elements of the quoted, comma-separated @var{list} as the arguments to @var{macro}. If @var{list} is empty, invoke @var{macro} without arguments. Note the difference between @code{m4_indir}, which expects its first argument to be a macro name but can use names that are otherwise invalid, and @code{m4_apply}, where @var{macro} can contain other text, but must end in a valid macro name. @example m4_apply([m4_count], []) @result{}0 m4_apply([m4_count], [[]]) @result{}1 m4_apply([m4_count], [[1], [2]]) @result{}2 m4_apply([m4_join], [[|], [1], [2]]) @result{}1|2 @end example @end defmac @defmac m4_count (@var{arg}, @dots{}) @msindex{count} This macro returns the decimal count of the number of arguments it was passed. @end defmac @defmac m4_curry (@var{macro}, @var{arg}@dots{}) @msindex{curry} This macro performs argument currying. The expansion of this macro is another macro name that expects exactly one argument; that argument is then appended to the @var{arg} list, and then @var{macro} is expanded with the resulting argument list. @example m4_curry([m4_curry], [m4_reverse], [1])([2])([3]) @result{}3, 2, 1 @end example Unfortunately, due to a limitation in M4 1.4.x, it is not possible to pass the definition of a builtin macro as the argument to the output of @code{m4_curry}; the empty string is used instead of the builtin token. This behavior is rectified by using M4 1.6 or newer. @end defmac @defmac m4_do (@var{arg}, @dots{}) @msindex{do} This macro loops over its arguments and expands each @var{arg} in sequence. Its main use is for readability; it allows the use of indentation and fewer @code{dnl} to result in the same expansion. This macro guarantees that no expansion will be concatenated with subsequent text; to achieve full concatenation, use @code{m4_unquote(m4_join([], @var{arg@dots{}}))}. @example m4_define([ab],[1])m4_define([bc],[2])m4_define([abc],[3])dnl m4_do([a],[b])c @result{}abc m4_unquote(m4_join([],[a],[b]))c @result{}3 m4_define([a],[A])m4_define([b],[B])m4_define([c],[C])dnl m4_define([AB],[4])m4_define([BC],[5])m4_define([ABC],[6])dnl m4_do([a],[b])c @result{}ABC m4_unquote(m4_join([],[a],[b]))c @result{}3 @end example @end defmac @defmac m4_dquote (@var{arg}, @dots{}) @msindex{dquote} Return the arguments as a quoted list of quoted arguments. Conveniently, if there is just one @var{arg}, this effectively adds a level of quoting. @end defmac @defmac m4_dquote_elt (@var{arg}, @dots{}) @msindex{dquote_elt} Return the arguments as a series of double-quoted arguments. Whereas @code{m4_dquote} returns a single argument, @code{m4_dquote_elt} returns as many arguments as it was passed. @end defmac @defmac m4_echo (@var{arg}, @dots{}) @msindex{echo} Return the arguments, with the same level of quoting. Other than discarding whitespace after unquoted commas, this macro is a no-op. @end defmac @defmac m4_expand (@var{arg}) @msindex{expand} Return the expansion of @var{arg} as a quoted string. Whereas @code{m4_quote} is designed to collect expanded text into a single argument, @code{m4_expand} is designed to perform one level of expansion on quoted text. One distinction is in the treatment of whitespace following a comma in the original @var{arg}. Any time multiple arguments are collected into one with @code{m4_quote}, the M4 argument collection rules discard the whitespace. However, with @code{m4_expand}, whitespace is preserved, even after the expansion of macros contained in @var{arg}. Additionally, @code{m4_expand} is able to expand text that would involve an unterminated comment, whereas expanding that same text as the argument to @code{m4_quote} runs into difficulty in finding the end of the argument. Since manipulating diversions during argument collection is inherently unsafe, @code{m4_expand} issues an error if @var{arg} attempts to change the current diversion (@pxref{Diversion support}). @example m4_define([active], [ACT, IVE])dnl m4_define([active2], [[ACT, IVE]])dnl m4_quote(active, active) @result{}ACT,IVE,ACT,IVE m4_expand([active, active]) @result{}ACT, IVE, ACT, IVE m4_quote(active2, active2) @result{}ACT, IVE,ACT, IVE m4_expand([active2, active2]) @result{}ACT, IVE, ACT, IVE m4_expand([# m4_echo]) @result{}# m4_echo m4_quote(# m4_echo) ) @result{}# m4_echo) @result{} @end example Note that @code{m4_expand} cannot handle an @var{arg} that expands to literal unbalanced quotes, but that quadrigraphs can be used when unbalanced output is necessary. Likewise, unbalanced parentheses should be supplied with double quoting or a quadrigraph. @example m4_define([pattern], [[!@@<:@@]])dnl m4_define([bar], [BAR])dnl m4_expand([case $foo in m4_defn([pattern])@@:@}@@ bar ;; *[)] blah ;; esac]) @result{}case $foo in @result{} [![]) BAR ;; @result{} *) blah ;; @result{}esac @end example @end defmac @defmac m4_ignore (@dots{}) @msindex{ignore} This macro was introduced in Autoconf 2.62. Expands to nothing, ignoring all of its arguments. By itself, this isn't very useful. However, it can be used to conditionally ignore an arbitrary number of arguments, by deciding which macro name to apply to a list of arguments. @example dnl foo outputs a message only if [debug] is defined. m4_define([foo], [m4_ifdef([debug],[AC_MSG_NOTICE],[m4_ignore])([debug message])]) @end example Note that for earlier versions of Autoconf, the macro @code{__gnu__} can serve the same purpose, although it is less readable. @end defmac @defmac m4_make_list (@var{arg}, @dots{}) @msindex{make_list} This macro exists to aid debugging of M4sugar algorithms. Its net effect is similar to @code{m4_dquote}---it produces a quoted list of quoted arguments, for each @var{arg}. The difference is that this version uses a comma-newline separator instead of just comma, to improve readability of the list; with the result that it is less efficient than @code{m4_dquote}. @example m4_define([zero],[0])m4_define([one],[1])m4_define([two],[2])dnl m4_dquote(zero, [one], [[two]]) @result{}[0],[one],[[two]] m4_make_list(zero, [one], [[two]]) @result{}[0], @result{}[one], @result{}[[two]] m4_foreach([number], m4_dquote(zero, [one], [[two]]), [ number]) @result{} 0 1 two m4_foreach([number], m4_make_list(zero, [one], [[two]]), [ number]) @result{} 0 1 two @end example @end defmac @c m4_noquote is too dangerous to document - it invokes macros that @c probably rely on @samp{[]} nested quoting for proper operation. The @c user should generally prefer m4_unquote instead. @defmac m4_quote (@var{arg}, @dots{}) @msindex{quote} Return the arguments as a single entity, i.e., wrap them into a pair of quotes. This effectively collapses multiple arguments into one, although it loses whitespace after unquoted commas in the process. @end defmac @defmac m4_reverse (@var{arg}, @dots{}) @msindex{reverse} Outputs each argument with the same level of quoting, but in reverse order, and with space following each comma for readability. @example m4_define([active], [ACT,IVE]) @result{} m4_reverse(active, [active]) @result{}active, IVE, ACT @end example @end defmac @defmac m4_unquote (@var{arg}, @dots{}) @msindex{unquote} This macro was introduced in Autoconf 2.62. Expand each argument, separated by commas. For a single @var{arg}, this effectively removes a layer of quoting, and @code{m4_unquote([@var{arg}])} is more efficient than the equivalent @code{m4_do([@var{arg}])}. For multiple arguments, this results in an unquoted list of expansions. This is commonly used with @code{m4_split}, in order to convert a single quoted list into a series of quoted elements. @end defmac The following example aims at emphasizing the difference between several scenarios: not using these macros, using @code{m4_defn}, using @code{m4_quote}, using @code{m4_dquote}, and using @code{m4_expand}. @example $ @kbd{cat example.m4} dnl Overquote, so that quotes are visible. m4_define([show], [$[]1 = [$1], $[]@@ = [$@@]]) m4_define([a], [A]) m4_define([mkargs], [1, 2[,] 3]) m4_define([arg1], [[$1]]) m4_divert([0])dnl show(a, b) show([a, b]) show(m4_quote(a, b)) show(m4_dquote(a, b)) show(m4_expand([a, b])) arg1(mkargs) arg1([mkargs]) arg1(m4_defn([mkargs])) arg1(m4_quote(mkargs)) arg1(m4_dquote(mkargs)) arg1(m4_expand([mkargs])) $ @kbd{autom4te -l m4sugar example.m4} $1 = A, $@@ = [A],[b] $1 = a, b, $@@ = [a, b] $1 = A,b, $@@ = [A,b] $1 = [A],[b], $@@ = [[A],[b]] $1 = A, b, $@@ = [A, b] 1 mkargs 1, 2[,] 3 1,2, 3 [1],[2, 3] 1, 2, 3 @end example @node Text processing Macros @subsection String manipulation in M4 The following macros may be used to manipulate strings in M4. Many of the macros in this section intentionally result in quoted strings as output, rather than subjecting the arguments to further expansions. As a result, if you are manipulating text that contains active M4 characters, the arguments are passed with single quoting rather than double. @defmac m4_append (@var{macro-name}, @var{string}, @ovar{separator}) @defmacx m4_append_uniq (@var{macro-name}, @var{string}, @ovar{separator} @ @ovar{if-uniq}, @ovar{if-duplicate}) @msindex{append} @msindex{append_uniq} Redefine @var{macro-name} to its former contents with @var{separator} and @var{string} added at the end. If @var{macro-name} was undefined before (but not if it was defined but empty), then no @var{separator} is added. As of Autoconf 2.62, neither @var{string} nor @var{separator} are expanded during this macro; instead, they are expanded when @var{macro-name} is invoked. @code{m4_append} can be used to grow strings, and @code{m4_append_uniq} to grow strings without duplicating substrings. Additionally, @code{m4_append_uniq} takes two optional parameters as of Autoconf 2.62; @var{if-uniq} is expanded if @var{string} was appended, and @var{if-duplicate} is expanded if @var{string} was already present. Also, @code{m4_append_uniq} warns if @var{separator} is not empty, but occurs within @var{string}, since that can lead to duplicates. Note that @code{m4_append} can scale linearly in the length of the final string, depending on the quality of the underlying M4 implementation, while @code{m4_append_uniq} has an inherent quadratic scaling factor. If an algorithm can tolerate duplicates in the final string, use the former for speed. If duplicates must be avoided, consider using @code{m4_set_add} instead (@pxref{Set manipulation Macros}). @example m4_define([active], [ACTIVE])dnl m4_append([sentence], [This is an])dnl m4_append([sentence], [ active ])dnl m4_append([sentence], [symbol.])dnl sentence @result{}This is an ACTIVE symbol. m4_undefine([active])dnl @result{}This is an active symbol. m4_append_uniq([list], [one], [, ], [new], [existing]) @result{}new m4_append_uniq([list], [one], [, ], [new], [existing]) @result{}existing m4_append_uniq([list], [two], [, ], [new], [existing]) @result{}new m4_append_uniq([list], [three], [, ], [new], [existing]) @result{}new m4_append_uniq([list], [two], [, ], [new], [existing]) @result{}existing list @result{}one, two, three m4_dquote(list) @result{}[one],[two],[three] m4_append([list2], [one], [[, ]])dnl m4_append_uniq([list2], [two], [[, ]])dnl m4_append([list2], [three], [[, ]])dnl list2 @result{}one, two, three m4_dquote(list2) @result{}[one, two, three] @end example @end defmac @defmac m4_append_uniq_w (@var{macro-name}, @var{strings}) @msindex{append_uniq_w} This macro was introduced in Autoconf 2.62. It is similar to @code{m4_append_uniq}, but treats @var{strings} as a whitespace separated list of words to append, and only appends unique words. @var{macro-name} is updated with a single space between new words. @example m4_append_uniq_w([numbers], [1 1 2])dnl m4_append_uniq_w([numbers], [ 2 3 ])dnl numbers @result{}1 2 3 @end example @end defmac @defmac m4_chomp (@var{string}) @defmacx m4_chomp_all (@var{string}) @msindex{chomp} @msindex{chomp_all} Output @var{string} in quotes, but without a trailing newline. The macro @code{m4_chomp} is slightly faster, and removes at most one newline; the macro @code{m4_chomp_all} removes all consecutive trailing newlines. Unlike @code{m4_flatten}, embedded newlines are left intact, and backslash does not influence the result. @end defmac @defmac m4_combine (@ovar{separator}, @var{prefix-list}, @ovar{infix}, @ @var{suffix-1}, @ovar{suffix-2}, @dots{}) @msindex{combine} This macro produces a quoted string containing the pairwise combination of every element of the quoted, comma-separated @var{prefix-list}, and every element from the @var{suffix} arguments. Each pairwise combination is joined with @var{infix} in the middle, and successive pairs are joined by @var{separator}. No expansion occurs on any of the arguments. No output occurs if either the @var{prefix} or @var{suffix} list is empty, but the lists can contain empty elements. @example m4_define([a], [oops])dnl m4_combine([, ], [[a], [b], [c]], [-], [1], [2], [3]) @result{}a-1, a-2, a-3, b-1, b-2, b-3, c-1, c-2, c-3 m4_combine([, ], [[a], [b]], [-]) @result{} m4_combine([, ], [[a], [b]], [-], []) @result{}a-, b- m4_combine([, ], [], [-], [1], [2]) @result{} m4_combine([, ], [[]], [-], [1], [2]) @result{}-1, -2 @end example @end defmac @defmac m4_escape (@var{string}) @msindex{escape} Convert all instances of @samp{[}, @samp{]}, @samp{#}, and @samp{$} within @var{string} into their respective quadrigraphs. The result is still a quoted string. @end defmac @defmac m4_flatten (@var{string}) @msindex{flatten} Flatten @var{string} into a single line. Delete all backslash-newline pairs, and replace all remaining newlines with a space. The result is still a quoted string. @end defmac @defmac m4_join (@ovar{separator}, @var{args}@dots{}) @defmacx m4_joinall (@ovar{separator}, @var{args}@dots{}) @msindex{join} @msindex{joinall} Concatenate each @var{arg}, separated by @var{separator}. @code{joinall} uses every argument, while @code{join} omits empty arguments so that there are no back-to-back separators in the output. The result is a quoted string. @example m4_define([active], [ACTIVE])dnl m4_join([|], [one], [], [active], [two]) @result{}one|active|two m4_joinall([|], [one], [], [active], [two]) @result{}one||active|two @end example Note that if all you intend to do is join @var{args} with commas between them, to form a quoted list suitable for @code{m4_foreach}, it is more efficient to use @code{m4_dquote}. @end defmac @defmac m4_newline (@ovar{text}) @msindex{newline} This macro was introduced in Autoconf 2.62, and expands to a newline, followed by any @var{text}. It is primarily useful for maintaining macro formatting, and ensuring that M4 does not discard leading whitespace during argument collection. @end defmac @defmac m4_normalize (@var{string}) @msindex{normalize} Remove leading and trailing spaces and tabs, sequences of backslash-then-newline, and replace multiple spaces, tabs, and newlines with a single space. This is a combination of @code{m4_flatten} and @code{m4_strip}. To determine if @var{string} consists only of bytes that would be removed by @code{m4_normalize}, you can use @code{m4_ifblank}. @end defmac @defmac m4_re_escape (@var{string}) @msindex{re_escape} Backslash-escape all characters in @var{string} that are active in regexps. @end defmac @c We cannot use @dvar because the macro expansion mistreats backslashes. @defmac m4_split (@var{string}, @r{[}@var{regexp} = @samp{[\t ]+}@r{]}) @msindex{split} Split @var{string} into an M4 list of elements quoted by @samp{[} and @samp{]}, while keeping white space at the beginning and at the end. If @var{regexp} is given, use it instead of @samp{[\t ]+} for splitting. If @var{string} is empty, the result is an empty list. @end defmac @defmac m4_strip (@var{string}) @msindex{strip} Strip whitespace from @var{string}. Sequences of spaces and tabs are reduced to a single space, then leading and trailing spaces are removed. The result is still a quoted string. Note that this does not interfere with newlines; if you want newlines stripped as well, consider @code{m4_flatten}, or do it all at once with @code{m4_normalize}. To quickly test if @var{string} has only whitespace, use @code{m4_ifblank}. @end defmac @defmac m4_text_box (@var{message}, @dvar{frame, -}) @msindex{text_box} Add a text box around @var{message}, using @var{frame} as the border character above and below the message. The @var{frame} argument must be a single byte, and does not support quadrigraphs. The frame correctly accounts for the subsequent expansion of @var{message}. For example: @example m4_define([macro], [abc])dnl m4_text_box([macro]) @result{}## --- ## @result{}## abc ## @result{}## --- ## @end example The @var{message} must contain balanced quotes and parentheses, although quadrigraphs can be used to work around this. @end defmac @defmac m4_text_wrap (@var{string}, @ovar{prefix}, @ @dvarv{prefix1, prefix}, @dvar{width, 79}) @msindex{text_wrap} Break @var{string} into a series of whitespace-separated words, then output those words separated by spaces, and wrapping lines any time the output would exceed @var{width} columns. If given, @var{prefix1} begins the first line, and @var{prefix} begins all wrapped lines. If @var{prefix1} is longer than @var{prefix}, then the first line consists of just @var{prefix1}. If @var{prefix} is longer than @var{prefix1}, padding is inserted so that the first word of @var{string} begins at the same indentation as all wrapped lines. Note that using literal tab characters in any of the arguments will interfere with the calculation of width. No expansions occur on @var{prefix}, @var{prefix1}, or the words of @var{string}, although quadrigraphs are recognized. For some examples: @example m4_text_wrap([Short string */], [ ], [/* ], [20]) @result{}/* Short string */ m4_text_wrap([Much longer string */], [ ], [/* ], [20]) @result{}/* Much longer @result{} string */ m4_text_wrap([Short doc.], [ ], [ --short ], [30]) @result{} --short Short doc. m4_text_wrap([Short doc.], [ ], [ --too-wide ], [30]) @result{} --too-wide @result{} Short doc. m4_text_wrap([Super long documentation.], [ ], [ --too-wide ], 30) @result{} --too-wide @result{} Super long @result{} documentation. @end example @end defmac @defmac m4_tolower (@var{string}) @defmacx m4_toupper (@var{string}) @msindex{tolower} @msindex{toupper} Return @var{string} with letters converted to upper or lower case, respectively. @end defmac @node Number processing Macros @subsection Arithmetic computation in M4 The following macros facilitate integer arithmetic operations. Where a parameter is documented as taking an arithmetic expression, you can use anything that can be parsed by @code{m4_eval}. @defmac m4_cmp (@var{expr-1}, @var{expr-2}) @msindex{cmp} Compare the arithmetic expressions @var{expr-1} and @var{expr-2}, and expand to @samp{-1} if @var{expr-1} is smaller, @samp{0} if they are equal, and @samp{1} if @var{expr-1} is larger. @end defmac @defmac m4_list_cmp (@var{list-1}, @var{list-2}) @msindex{list_cmp} Compare the two M4 lists consisting of comma-separated arithmetic expressions, left to right. Expand to @samp{-1} for the first element pairing where the value from @var{list-1} is smaller, @samp{1} where the value from @var{list-2} is smaller, or @samp{0} if both lists have the same values. If one list is shorter than the other, the remaining elements of the longer list are compared against zero. @example m4_list_cmp([1, 0], [1]) @result{}0 m4_list_cmp([1, [1 * 0]], [1, 0]) @result{}0 m4_list_cmp([1, 2], [1, 0]) @result{}1 m4_list_cmp([1, [1+1], 3],[1, 2]) @result{}1 m4_list_cmp([1, 2, -3], [1, 2]) @result{}-1 m4_list_cmp([1, 0], [1, 2]) @result{}-1 m4_list_cmp([1], [1, 2]) @result{}-1 @end example @end defmac @defmac m4_max (@var{arg}, @dots{}) @msindex{max} This macro was introduced in Autoconf 2.62. Expand to the decimal value of the maximum arithmetic expression among all the arguments. @end defmac @defmac m4_min (@var{arg}, @dots{}) @msindex{min} This macro was introduced in Autoconf 2.62. Expand to the decimal value of the minimum arithmetic expression among all the arguments. @end defmac @defmac m4_sign (@var{expr}) @msindex{sign} Expand to @samp{-1} if the arithmetic expression @var{expr} is negative, @samp{1} if it is positive, and @samp{0} if it is zero. @end defmac @anchor{m4_version_compare} @defmac m4_version_compare (@var{version-1}, @var{version-2}) @msindex{version_compare} This macro was introduced in Autoconf 2.53, but had a number of usability limitations that were not lifted until Autoconf 2.62. Compare the version strings @var{version-1} and @var{version-2}, and expand to @samp{-1} if @var{version-1} is smaller, @samp{0} if they are the same, or @samp{1} @var{version-2} is smaller. Version strings must be a list of elements separated by @samp{.}, @samp{,} or @samp{-}, where each element is a number along with optional case-insensitive letters designating beta releases. The comparison stops at the leftmost element that contains a difference, although a 0 element compares equal to a missing element. It is permissible to include commit identifiers in @var{version}, such as an abbreviated SHA1 of the commit, provided there is still a monotonically increasing prefix to allow for accurate version-based comparisons. For example, this paragraph was written when the development snapshot of autoconf claimed to be at version @samp{2.61a-248-dc51}, or 248 commits after the 2.61a release, with an abbreviated commit identification of @samp{dc51}. @example m4_version_compare([1.1], [2.0]) @result{}-1 m4_version_compare([2.0b], [2.0a]) @result{}1 m4_version_compare([1.1.1], [1.1.1a]) @result{}-1 m4_version_compare([1.2], [1.1.1a]) @result{}1 m4_version_compare([1.0], [1]) @result{}0 m4_version_compare([1.1pre], [1.1PRE]) @result{}0 m4_version_compare([1.1a], [1,10]) @result{}-1 m4_version_compare([2.61a], [2.61a-248-dc51]) @result{}-1 m4_version_compare([2.61b], [2.61a-248-dc51]) @result{}1 @end example @end defmac @defmac m4_version_prereq (@var{version}, @ovar{if-new-enough}, @ @dvar{if-old, m4_fatal}) @msindex{version_prereq} Compares @var{version} against the version of Autoconf currently running. If the running version is at @var{version} or newer, expand @var{if-new-enough}, but if @var{version} is larger than the version currently executing, expand @var{if-old}, which defaults to printing an error message and exiting m4sugar with status 63. When given only one argument, this behaves like @code{AC_PREREQ} (@pxref{Versioning}). Remember that the autoconf philosophy favors feature checks over version checks. @end defmac @node Set manipulation Macros @subsection Set manipulation in M4 @cindex Set manipulation @cindex Data structure, set @cindex Unordered set manipulation Sometimes, it is necessary to track a set of data, where the order does not matter and where there are no duplicates in the set. The following macros facilitate set manipulations. Each set is an opaque object, which can only be accessed via these basic operations. The underlying implementation guarantees linear scaling for set creation, which is more efficient than using the quadratic @code{m4_append_uniq}. Both set names and values can be arbitrary strings, except for unbalanced quotes. This implementation ties up memory for removed elements until the next operation that must traverse all the elements of a set; and although that may slow down some operations until the memory for removed elements is pruned, it still guarantees linear performance. @defmac m4_set_add (@var{set}, @var{value}, @ovar{if-uniq}, @ovar{if-dup}) @msindex{set_add} Adds the string @var{value} as a member of set @var{set}. Expand @var{if-uniq} if the element was added, or @var{if-dup} if it was previously in the set. Operates in amortized constant time, so that set creation scales linearly. @end defmac @defmac m4_set_add_all (@var{set}, @var{value}@dots{}) @msindex{set_add_all} Adds each @var{value} to the set @var{set}. This is slightly more efficient than repeatedly invoking @code{m4_set_add}. @end defmac @defmac m4_set_contains (@var{set}, @var{value}, @ovar{if-present}, @ @ovar{if-absent}) @msindex{set_contains} Expands @var{if-present} if the string @var{value} is a member of @var{set}, otherwise @var{if-absent}. @example m4_set_contains([a], [1], [yes], [no]) @result{}no m4_set_add([a], [1], [added], [dup]) @result{}added m4_set_add([a], [1], [added], [dup]) @result{}dup m4_set_contains([a], [1], [yes], [no]) @result{}yes m4_set_remove([a], [1], [removed], [missing]) @result{}removed m4_set_contains([a], [1], [yes], [no]) @result{}no m4_set_remove([a], [1], [removed], [missing]) @result{}missing @end example @end defmac @defmac m4_set_contents (@var{set}, @ovar{sep}) @defmacx m4_set_dump (@var{set}, @ovar{sep}) @msindex{set_contents} @msindex{set_dump} Expands to a single string consisting of all the members of the set @var{set}, each separated by @var{sep}, which is not expanded. @code{m4_set_contents} leaves the elements in @var{set} but reclaims any memory occupied by removed elements, while @code{m4_set_dump} is a faster one-shot action that also deletes the set. No provision is made for disambiguating members that contain a non-empty @var{sep} as a substring; use @code{m4_set_empty} to distinguish between an empty set and the set containing only the empty string. The order of the output is unspecified; in the current implementation, part of the speed of @code{m4_set_dump} results from using a different output order than @code{m4_set_contents}. These macros scale linearly in the size of the set before memory pruning, and @code{m4_set_contents([@var{set}], [@var{sep}])} is faster than @code{m4_joinall([@var{sep}]m4_set_listc([@var{set}]))}. @example m4_set_add_all([a], [1], [2], [3]) @result{} m4_set_contents([a], [-]) @result{}1-2-3 m4_joinall([-]m4_set_listc([a])) @result{}1-2-3 m4_set_dump([a], [-]) @result{}3-2-1 m4_set_contents([a]) @result{} m4_set_add([a], []) @result{} m4_set_contents([a], [-]) @result{} @end example @end defmac @defmac m4_set_delete (@var{set}) @msindex{set_delete} Delete all elements and memory associated with @var{set}. This is linear in the set size, and faster than removing one element at a time. @end defmac @defmac m4_set_difference (@var{seta}, @var{setb}) @defmacx m4_set_intersection (@var{seta}, @var{setb}) @defmacx m4_set_union (@var{seta}, @var{setb}) @msindex{set_difference} @msindex{set_intersection} @msindex{set_union} Compute the relation between @var{seta} and @var{setb}, and output the result as a list of quoted arguments without duplicates and with a leading comma. Set difference selects the elements in @var{seta} but not @var{setb}, intersection selects only elements in both sets, and union selects elements in either set. These actions are linear in the sum of the set sizes. The leading comma is necessary to distinguish between no elements and the empty string as the only element. @example m4_set_add_all([a], [1], [2], [3]) @result{} m4_set_add_all([b], [3], [], [4]) @result{} m4_set_difference([a], [b]) @result{},1,2 m4_set_difference([b], [a]) @result{},,4 m4_set_intersection([a], [b]) @result{},3 m4_set_union([a], [b]) @result{},1,2,3,,4 @end example @end defmac @defmac m4_set_empty (@var{set}, @ovar{if-empty}, @ovar{if-elements}) @msindex{set_empty} Expand @var{if-empty} if the set @var{set} has no elements, otherwise expand @var{if-elements}. This macro operates in constant time. Using this macro can help disambiguate output from @code{m4_set_contents} or @code{m4_set_list}. @end defmac @defmac m4_set_foreach (@var{set}, @var{variable}, @var{action}) @msindex{set_foreach} For each element in the set @var{set}, expand @var{action} with the macro @var{variable} defined as the set element. Behavior is unspecified if @var{action} recursively lists the contents of @var{set} (although listing other sets is acceptable), or if it modifies the set in any way other than removing the element currently contained in @var{variable}. This macro is faster than the corresponding @code{m4_foreach([@var{variable}], m4_indir([m4_dquote]m4_set_listc([@var{set}])), [@var{action}])}, although @code{m4_set_map} might be faster still. @example m4_set_add_all([a]m4_for([i], [1], [5], [], [,i])) @result{} m4_set_contents([a]) @result{}12345 m4_set_foreach([a], [i], [m4_if(m4_eval(i&1), [0], [m4_set_remove([a], i, [i])])]) @result{}24 m4_set_contents([a]) @result{}135 @end example @end defmac @defmac m4_set_list (@var{set}) @defmacx m4_set_listc (@var{set}) @msindex{set_list} @msindex{set_listc} Produce a list of arguments, where each argument is a quoted element from the set @var{set}. The variant @code{m4_set_listc} is unambiguous, by adding a leading comma if there are any set elements, whereas the variant @code{m4_set_list} cannot distinguish between an empty set and a set containing only the empty string. These can be directly used in macros that take multiple arguments, such as @code{m4_join} or @code{m4_set_add_all}, or wrapped by @code{m4_dquote} for macros that take a quoted list, such as @code{m4_map} or @code{m4_foreach}. Any memory occupied by removed elements is reclaimed during these macros. @example m4_set_add_all([a], [1], [2], [3]) @result{} m4_set_list([a]) @result{}1,2,3 m4_set_list([b]) @result{} m4_set_listc([b]) @result{} m4_count(m4_set_list([b])) @result{}1 m4_set_empty([b], [0], [m4_count(m4_set_list([b]))]) @result{}0 m4_set_add([b], []) @result{} m4_set_list([b]) @result{} m4_set_listc([b]) @result{}, m4_count(m4_set_list([b])) @result{}1 m4_set_empty([b], [0], [m4_count(m4_set_list([b]))]) @result{}1 @end example @end defmac @defmac m4_set_map (@var{set}, @var{action}) @msindex{set_map} For each element in the set @var{set}, expand @var{action} with a single argument of the set element. Behavior is unspecified if @var{action} recursively lists the contents of @var{set} (although listing other sets is acceptable), or if it modifies the set in any way other than removing the element passed as an argument. This macro is faster than either corresponding counterpart of @code{m4_map_args([@var{action}]m4_set_listc([@var{set}]))} or @code{m4_set_foreach([@var{set}], [var], [@var{action}(m4_defn([var]))])}. It is possible to use @code{m4_curry} if more than one argument is needed for @var{action}, although it is more efficient to use @code{m4_set_map_sep} in that case. @end defmac @defmac m4_set_map_sep (@var{set}, @ovar{pre}, @ovar{post}, @ovar{sep}) @msindex{set_map_sep} For each element in the set @var{set}, expand @code{@var{pre}[element]@var{post}}, additionally expanding @var{sep} between elements. Behavior is unspecified if the expansion recursively lists the contents of @var{set} (although listing other sets is acceptable), or if it modifies the set in any way other than removing the element visited by the expansion. This macro provides the most efficient means for non-destructively visiting the elements of a set; in particular, @code{m4_set_map([@var{set}], [@var{action}])} is equivalent to @code{m4_set_map_sep([@var{set}], [@var{action}(], [)])}. @end defmac @defmac m4_set_remove (@var{set}, @var{value}, @ovar{if-present}, @ @ovar{if-absent}) @msindex{set_remove} If @var{value} is an element in the set @var{set}, then remove it and expand @var{if-present}. Otherwise expand @var{if-absent}. This macro operates in constant time so that multiple removals will scale linearly rather than quadratically; but when used outside of @code{m4_set_foreach} or @code{m4_set_map}, it leaves memory occupied until the set is later compacted by @code{m4_set_contents} or @code{m4_set_list}. Several other set operations are then less efficient between the time of element removal and subsequent memory compaction, but still maintain their guaranteed scaling performance. @end defmac @defmac m4_set_size (@var{set}) @msindex{set_size} Expand to the size of the set @var{set}. This implementation operates in constant time, and is thus more efficient than @code{m4_eval(m4_count(m4_set_listc([set])) - 1)}. @end defmac @node Forbidden Patterns @subsection Forbidden Patterns @cindex Forbidden patterns @cindex Patterns, forbidden M4sugar provides a means to define suspicious patterns, patterns describing tokens which should not be found in the output. For instance, if an Autoconf @file{configure} script includes tokens such as @samp{AC_DEFINE}, or @samp{dnl}, then most probably something went wrong (typically a macro was not evaluated because of overquotation). M4sugar forbids all the tokens matching @samp{^_?m4_} and @samp{^dnl$}. Additional layers, such as M4sh and Autoconf, add additional forbidden patterns to the list. @defmac m4_pattern_forbid (@var{pattern}) @msindex{pattern_forbid} Declare that no token matching @var{pattern} must be found in the output. The output file is (temporarily) split into one word per line as part of the @command{autom4te} post-processing, with each line (and therefore word) then being checked against the Perl regular expression @var{pattern}. If the regular expression matches, and @code{m4_pattern_allow} does not also match, then an error is raised. Comments are not checked; this can be a problem if, for instance, you have some macro left unexpanded after an @samp{#include}. No consensus is currently found in the Autoconf community, as some people consider it should be valid to name macros in comments (which doesn't make sense to the authors of this documentation: input, such as macros, should be documented by @samp{dnl} comments; reserving @samp{#}-comments to document the output). As an example, if you define your own macros that begin with @samp{M_} and are composed from capital letters and underscores, the specification of @code{m4_pattern_forbid([^M_[A-Z_]+])} will ensure all your macros are expanded when not used in comments. As an example of a common use of this macro, consider what happens in packages that want to use the @command{pkg-config} script via the third-party @code{PKG_CHECK_MODULES} macro. By default, if a developer checks out the development tree but has not yet installed the pkg-config macros locally, they can manage to successfully run @command{autoconf} on the package, but the resulting @file{configure} file will likely result in a confusing shell message about a syntax error on the line mentioning the unexpanded @code{PKG_CHECK_MODULES} macro. On the other hand, if @file{configure.ac} includes @code{m4_pattern_forbid([^PKG_])}, the missing pkg-config macros will be detected immediately without allowing @command{autoconf} to succeed. @end defmac Of course, you might encounter exceptions to these generic rules, for instance you might have to refer to @samp{$m4_flags}. @defmac m4_pattern_allow (@var{pattern}) @msindex{pattern_allow} Any token matching @var{pattern} is allowed, including if it matches an @code{m4_pattern_forbid} pattern. For example, Gnulib uses @code{m4_pattern_forbid([^gl_])} to reserve the @code{gl_} namespace for itself, but also uses @code{m4_pattern_allow([^gl_ES$])} to avoid a false negative on the valid locale name. @end defmac @node Debugging via autom4te @section Debugging via autom4te @cindex debugging tips @cindex autom4te debugging tips @cindex m4sugar debugging tips At times, it is desirable to see what was happening inside m4, to see why output was not matching expectations. However, post-processing done by @command{autom4te} means that directly using the m4 builtin @code{m4_traceon} is likely to interfere with operation. Also, frequent diversion changes and the concept of forbidden tokens make it difficult to use @code{m4_defn} to generate inline comments in the final output. There are a couple of tools to help with this. One is the use of the @option{--trace} option provided by @command{autom4te} (as well as each of the programs that wrap @command{autom4te}, such as @command{autoconf}), in order to inspect when a macro is called and with which arguments. For example, when this paragraph was written, the autoconf version could be found by: @example $ @kbd{autoconf --trace=AC_INIT} configure.ac:23:AC_INIT:GNU Autoconf:2.63b.95-3963:bug-autoconf@@gnu.org $ @kbd{autoconf --trace='AC_INIT:version is $2'} version is 2.63b.95-3963 @end example Another trick is to print out the expansion of various m4 expressions to standard error or to an independent file, with no further m4 expansion, and without interfering with diversion changes or the post-processing done to standard output. @code{m4_errprintn} shows a given expression on standard error. For example, if you want to see the expansion of an autoconf primitive or of one of your autoconf macros, you can do it like this: @example $ @kbd{cat <<\EOF > configure.ac} AC_INIT m4_errprintn([The definition of AC_DEFINE_UNQUOTED:]) m4_errprintn(m4_defn([AC_DEFINE_UNQUOTED])) AC_OUTPUT EOF $ @kbd{autoconf} @error{}The definition of AC_DEFINE_UNQUOTED: @error{}_AC_DEFINE_Q([], $@@) @end example @node Programming in M4sh @chapter Programming in M4sh M4sh, pronounced ``mash'', is aiming at producing portable Bourne shell scripts. This name was coined by Lars J. Aas, who notes that, according to the Webster's Revised Unabridged Dictionary (1913): @quotation Mash \Mash\, n. [Akin to G. meisch, maisch, meische, maische, mash, wash, and prob.@: to AS. miscian to mix. See ``Mix''.] @enumerate 1 @item A mass of mixed ingredients reduced to a soft pulpy state by beating or pressure@enddots{} @item A mixture of meal or bran and water fed to animals. @item A mess; trouble. [Obs.] --Beau.@: & Fl. @end enumerate @end quotation M4sh reserves the M4 macro namespace @samp{^_AS_} for internal use, and the namespace @samp{^AS_} for M4sh macros. It also reserves the shell and environment variable namespace @samp{^as_}, and the here-document delimiter namespace @samp{^_AS[A-Z]} in the output file. You should not define your own macros or output shell code that conflicts with these namespaces. @menu * Common Shell Constructs:: Portability layer for common shell constructs * Polymorphic Variables:: Support for indirect variable names * Initialization Macros:: Macros to establish a sane shell environment * File Descriptor Macros:: File descriptor macros for input and output @end menu @node Common Shell Constructs @section Common Shell Constructs M4sh provides portable alternatives for some common shell constructs that unfortunately are not portable in practice. @c Deprecated, to be replaced by a better API @ignore @defmac AS_BASENAME (@var{file-name}) @asindex{BASENAME} Output the non-directory portion of @var{file-name}. For example, if @code{$file} is @samp{/one/two/three}, the command @code{base=`AS_BASENAME(["$file"])`} sets @code{base} to @samp{three}. @end defmac @end ignore @defmac AS_BOX (@var{text}, @dvar{char, -}) @asindex{BOX} Expand into shell code that will output @var{text} surrounded by a box with @var{char} in the top and bottom border. @var{text} should not contain a newline, but may contain shell expansions valid for unquoted here-documents. @var{char} defaults to @samp{-}, but can be any character except @samp{/}, @samp{'}, @samp{"}, @samp{\}, @samp{&}, or @samp{`}. This is useful for outputting a comment box into log files to separate distinct phases of script operation. @end defmac @defmac AS_CASE (@var{word}, @ovar{pattern1}, @ovar{if-matched1}, @ @dots{}, @ovar{default}) @asindex{CASE} Expand into a shell @samp{case} statement, where @var{word} is matched against one or more patterns. @var{if-matched} is run if the corresponding pattern matched @var{word}, else @var{default} is run. @xref{Prerequisite Macros} for why this macro should be used instead of plain @samp{case} in code outside of an @code{AC_DEFUN} macro, when the contents of the @samp{case} use @code{AC_REQUIRE} directly or indirectly. @xref{case, , Limitations of Shell Builtins}, for how this macro avoids some portability issues. @xref{Balancing Parentheses} for how this macro lets you write code with balanced parentheses even if your code must run on obsolescent shells. @end defmac @c Deprecated, to be replaced by a better API @defmac AS_DIRNAME (@var{file-name}) @asindex{DIRNAME} Output the directory portion of @var{file-name}. For example, if @code{$file} is @samp{/one/two/three}, the command @code{dir=`AS_DIRNAME(["$file"])`} sets @code{dir} to @samp{/one/two}. This interface may be improved in the future to avoid forks and losing trailing newlines. @end defmac @defmac AS_ECHO (@var{word}) @asindex{ECHO} Emits @var{word} to the standard output, followed by a newline. @var{word} must be a single shell word (typically a quoted string). The bytes of @var{word} are output as-is, even if it starts with "-" or contains "\". Redirections can be placed outside the macro invocation. This is much more portable than using @command{echo} (@pxref{echo, , Limitations of Shell Builtins}). @end defmac @defmac AS_ECHO_N (@var{word}) @asindex{ECHO_N} Emits @var{word} to the standard output, without a following newline. @var{word} must be a single shell word (typically a quoted string) and, for portability, should not include more than one newline. The bytes of @var{word} are output as-is, even if it starts with "-" or contains "\". Redirections can be placed outside the macro invocation. @end defmac @c We cannot use @dvar because the macro expansion mistreats backslashes. @defmac AS_ESCAPE (@var{string}, @r{[}@var{chars} = @samp{`\"$}@r{]}) @asindex{ESCAPE} Expands to @var{string}, with any characters in @var{chars} escaped with a backslash (@samp{\}). @var{chars} should be at most four bytes long, and only contain characters from the set @samp{`\"$}; however, characters may be safely listed more than once in @var{chars} for the sake of syntax highlighting editors. The current implementation expands @var{string} after adding escapes; if @var{string} contains macro calls that in turn expand to text needing shell quoting, you can use @code{AS_ESCAPE(m4_dquote(m4_expand([string])))}. The default for @var{chars} (@samp{\"$`}) is the set of characters needing escapes when @var{string} will be used literally within double quotes. One common variant is the set of characters to protect when @var{string} will be used literally within back-ticks or an unquoted here-document (@samp{\$`}). Another common variant is @samp{""}, which can be used to form a double-quoted string containing the same expansions that would have occurred if @var{string} were expanded in an unquoted here-document; however, when using this variant, care must be taken that @var{string} does not use double quotes within complex variable expansions (such as @samp{$@{foo-`echo "hi"`@}}) that would be broken with improper escapes. This macro is often used with @code{AS_ECHO}. For an example, observe the output generated by the shell code generated from this snippet: @example foo=bar AS_ECHO(["AS_ESCAPE(["$foo" = ])AS_ESCAPE(["$foo"], [""])"]) @result{}"$foo" = "bar" m4_define([macro], [a, [\b]]) AS_ECHO(["AS_ESCAPE([[macro]])"]) @result{}macro AS_ECHO(["AS_ESCAPE([macro])"]) @result{}a, b AS_ECHO(["AS_ESCAPE(m4_dquote(m4_expand([macro])))"]) @result{}a, \b @end example @comment Should we add AS_ESCAPE_SINGLE? If we do, we can optimize in @comment the case of @var{string} that does not contain '. To escape a string that will be placed within single quotes, use: @example m4_bpatsubst([[@var{string}]], ['], ['\\'']) @end example @end defmac @defmac AS_EXECUTABLE_P (@var{file}) @asindex{EXECUTABLE_P} Emit code to probe whether @var{file} is a regular file with executable permissions (and not a directory with search permissions). The caller is responsible for quoting @var{file}. @end defmac @defmac AS_EXIT (@dvar{status, $?}) @asindex{EXIT} Emit code to exit the shell with @var{status}, defaulting to @samp{$?}. This macro works around shells that see the exit status of the command prior to @code{exit} inside a @samp{trap 0} handler (@pxref{trap, , Limitations of Shell Builtins}). @end defmac @defmac AS_IF (@var{test1}, @ovar{run-if-true1}, @dots{}, @ovar{run-if-false}) @asindex{IF} Run shell code @var{test1}. If @var{test1} exits with a zero status then run shell code @var{run-if-true1}, else examine further tests. If no test exits with a zero status, run shell code @var{run-if-false}, with simplifications if either @var{run-if-true1} or @var{run-if-false} is empty. For example, @example AS_IF([test "x$foo" = xyes], [HANDLE_FOO([yes])], [test "x$foo" != xno], [HANDLE_FOO([maybe])], [echo foo not specified]) @end example @noindent ensures any required macros of @code{HANDLE_FOO} are expanded before the first test. This macro should be used instead of plain @samp{if} in code outside of an @code{AC_DEFUN} macro, when the contents of the @samp{if} use @code{AC_REQUIRE} directly or indirectly (@pxref{Prerequisite Macros}). @end defmac @defmac AS_MKDIR_P (@var{file-name}) @asindex{MKDIR_P} Make the directory @var{file-name}, including intervening directories as necessary. This is equivalent to @samp{mkdir -p -- @var{file-name}}, except that it is portable to older versions of @command{mkdir} that lack support for the @option{-p} option or for the @option{--} delimiter (@pxref{mkdir, , Limitations of Usual Tools}). Also, @code{AS_MKDIR_P} succeeds if @var{file-name} is a symbolic link to an existing directory, even though Posix is unclear whether @samp{mkdir -p} should succeed in that case. If creation of @var{file-name} fails, exit the script. Also see the @code{AC_PROG_MKDIR_P} macro (@pxref{Particular Programs}). @end defmac @defmac AS_SET_STATUS (@var{status}) @asindex{SET_STATUS} Emit shell code to set the value of @samp{$?} to @var{status}, as efficiently as possible. However, this is not guaranteed to abort a shell running with @code{set -e} (@pxref{set, , Limitations of Shell Builtins}). This should also be used at the end of a complex shell function instead of @samp{return} (@pxref{Shell Functions}) to avoid a DJGPP shell bug. @end defmac @defmac AS_TR_CPP (@var{expression}) @asindex{TR_CPP} Transform @var{expression} into a valid right-hand side for a C @code{#define}. For example: @example # This outputs "#define HAVE_CHAR_P 1". # Notice the m4 quoting around #, to prevent an m4 comment type="char *" echo "[#]define AS_TR_CPP([HAVE_$type]) 1" @end example @end defmac @defmac AS_TR_SH (@var{expression}) @asindex{TR_SH} Transform @var{expression} into shell code that generates a valid shell variable name. The result is literal when possible at m4 time, but must be used with @code{eval} if @var{expression} causes shell indirections. For example: @example # This outputs "Have it!". header="sys/some file.h" eval AS_TR_SH([HAVE_$header])=yes if test "x$HAVE_sys_some_file_h" = xyes; then echo "Have it!"; fi @end example @end defmac @defmac AS_SET_CATFILE (@var{var}, @var{dir}, @var{file}) @asindex{SET_CATFILE} Set the polymorphic shell variable @var{var} to @var{dir}/@var{file}, but optimizing the common cases (@var{dir} or @var{file} is @samp{.}, @var{file} is absolute, etc.). @end defmac @defmac AS_UNSET (@var{var}) @asindex{UNSET} Unsets the shell variable @var{var}, working around bugs in older shells (@pxref{unset, , Limitations of Shell Builtins}). @var{var} can be a literal or indirect variable name. @end defmac @defmac AS_VERSION_COMPARE (@var{version-1}, @var{version-2}, @ @ovar{action-if-less}, @ovar{action-if-equal}, @ovar{action-if-greater}) @asindex{VERSION_COMPARE} Compare two strings @var{version-1} and @var{version-2}, possibly containing shell variables, as version strings, and expand @var{action-if-less}, @var{action-if-equal}, or @var{action-if-greater} depending upon the result. The algorithm to compare is similar to the one used by strverscmp in glibc (@pxref{String/Array Comparison, , String/Array Comparison, libc, The GNU C Library}). @end defmac @node Polymorphic Variables @section Support for indirect variable names @cindex variable name indirection @cindex polymorphic variable name @cindex indirection, variable name Often, it is convenient to write a macro that will emit shell code operating on a shell variable. The simplest case is when the variable name is known. But a more powerful idiom is writing shell code that can work through an indirection, where another variable or command substitution produces the name of the variable to actually manipulate. M4sh supports the notion of polymorphic shell variables, making it easy to write a macro that can deal with either literal or indirect variable names and output shell code appropriate for both use cases. Behavior is undefined if expansion of an indirect variable does not result in a literal variable name. @defmac AS_LITERAL_IF (@var{expression}, @ovar{if-literal}, @ovar{if-not}, @ @dvarv{if-simple-ref, if-not}) @defmacx AS_LITERAL_WORD_IF (@var{expression}, @ovar{if-literal}, @ @ovar{if-not}, @dvarv{if-simple-ref, if-not}) @asindex{LITERAL_IF} @asindex{LITERAL_WORD_IF} If the expansion of @var{expression} is definitely a shell literal, expand @var{if-literal}. If the expansion of @var{expression} looks like it might contain shell indirections (such as @code{$var} or @code{`expr`}), then @var{if-not} is expanded. Sometimes, it is possible to output optimized code if @var{expression} consists only of shell variable expansions (such as @code{$@{var@}}), in which case @var{if-simple-ref} can be provided; but defaulting to @var{if-not} should always be safe. @code{AS_LITERAL_WORD_IF} only expands @var{if-literal} if @var{expression} looks like a single shell word, containing no whitespace; while @code{AS_LITERAL_IF} allows whitespace in @var{expression}. In order to reduce the time spent recognizing whether an @var{expression} qualifies as a literal or a simple indirection, the implementation is somewhat conservative: @var{expression} must be a single shell word (possibly after stripping whitespace), consisting only of bytes that would have the same meaning whether unquoted or enclosed in double quotes (for example, @samp{a.b} results in @var{if-literal}, even though it is not a valid shell variable name; while both @samp{'a'} and @samp{[$]} result in @var{if-not}, because they behave differently than @samp{"'a'"} and @samp{"[$]"}). This macro can be used in contexts for recognizing portable file names (such as in the implementation of @code{AC_LIBSOURCE}), or coupled with some transliterations for forming valid variable names (such as in the implementation of @code{AS_TR_SH}, which uses an additional @code{m4_translit} to convert @samp{.} to @samp{_}). This example shows how to read the contents of the shell variable @code{bar}, exercising all three arguments to @code{AS_LITERAL_IF}. It results in a script that will output the line @samp{hello} three times. @example AC_DEFUN([MY_ACTION], [AS_LITERAL_IF([$1], [echo "$$1"], @c $$ [AS_VAR_COPY([var], [$1]) echo "$var"], [eval 'echo "$'"$1"\"])]) foo=bar bar=hello MY_ACTION([bar]) MY_ACTION([`echo bar`]) MY_ACTION([$foo]) @end example @end defmac @defmac AS_VAR_APPEND (@var{var}, @var{text}) @asindex{VAR_APPEND} Emit shell code to append the shell expansion of @var{text} to the end of the current contents of the polymorphic shell variable @var{var}, taking advantage of shells that provide the @samp{+=} extension for more efficient scaling. For situations where the final contents of @var{var} are relatively short (less than 256 bytes), it is more efficient to use the simpler code sequence of @code{@var{var}=$@{@var{var}@}@var{text}} (or its polymorphic equivalent of @code{AS_VAR_COPY([t], [@var{var}])} and @code{AS_VAR_SET([@var{var}], ["$t"@var{text}])}). But in the case when the script will be repeatedly appending text into @code{var}, issues of scaling start to become apparent. A naive implementation requires execution time linear to the length of the current contents of @var{var} as well as the length of @var{text} for a single append, for an overall quadratic scaling with multiple appends. This macro takes advantage of shells which provide the extension @code{@var{var}+=@var{text}}, which can provide amortized constant time for a single append, for an overall linear scaling with multiple appends. Note that unlike @code{AS_VAR_SET}, this macro requires that @var{text} be quoted properly to avoid field splitting and file name expansion. @end defmac @defmac AS_VAR_ARITH (@var{var}, @var{expression}) @asindex{VAR_ARITH} Emit shell code to compute the arithmetic expansion of @var{expression}, assigning the result as the contents of the polymorphic shell variable @var{var}. The code takes advantage of shells that provide @samp{$(())} for fewer forks, but uses @command{expr} as a fallback. Therefore, the syntax for a valid @var{expression} is rather limited: all operators must occur as separate shell arguments and with proper quoting, there is no portable equality operator, all variables containing numeric values must be expanded prior to the computation, all numeric values must be provided in decimal without leading zeroes, and the first shell argument should not be a negative number. In the following example, this snippet will print @samp{(2+3)*4 == 20}. @example bar=3 AS_VAR_ARITH([foo], [\( 2 + $bar \) \* 4]) echo "(2+$bar)*4 == $foo" @end example @end defmac @defmac AS_VAR_COPY (@var{dest}, @var{source}) @asindex{VAR_COPY} Emit shell code to assign the contents of the polymorphic shell variable @var{source} to the polymorphic shell variable @var{dest}. For example, executing this M4sh snippet will output @samp{bar hi}: @example foo=bar bar=hi AS_VAR_COPY([a], [foo]) AS_VAR_COPY([b], [$foo]) echo "$a $b" @end example When it is necessary to access the contents of an indirect variable inside a shell double-quoted context, the recommended idiom is to first copy the contents into a temporary literal shell variable. @smallexample for header in stdint_h inttypes_h ; do AS_VAR_COPY([var], [ac_cv_header_$header]) echo "$header detected: $var" done @end smallexample @end defmac @comment AS_VAR_GET is intentionally undocumented; it can't handle @comment trailing newlines uniformly, and forks too much. @defmac AS_VAR_IF (@var{var}, @ovar{word}, @ovar{if-equal}, @ @ovar{if-not-equal}) @asindex{VAR_IF} Output a shell conditional statement. If the contents of the polymorphic shell variable @var{var} match the string @var{word}, execute @var{if-equal}; otherwise execute @var{if-not-equal}. @var{word} must be a single shell word (typically a quoted string). Avoids shell bugs if an interrupt signal arrives while a command substitution in @var{var} is being expanded. @end defmac @defmac AS_VAR_PUSHDEF (@var{m4-name}, @var{value}) @defmacx AS_VAR_POPDEF (@var{m4-name}) @asindex{VAR_PUSHDEF} @asindex{VAR_POPDEF} @cindex composing variable names @cindex variable names, composing A common M4sh idiom involves composing shell variable names from an m4 argument (for example, writing a macro that uses a cache variable). @var{value} can be an arbitrary string, which will be transliterated into a valid shell name by @code{AS_TR_SH}. In order to access the composed variable name based on @var{value}, it is easier to declare a temporary m4 macro @var{m4-name} with @code{AS_VAR_PUSHDEF}, then use that macro as the argument to subsequent @code{AS_VAR} macros as a polymorphic variable name, and finally free the temporary macro with @code{AS_VAR_POPDEF}. These macros are often followed with @code{dnl}, to avoid excess newlines in the output. Here is an involved example, that shows the power of writing macros that can handle composed shell variable names: @example m4_define([MY_CHECK_HEADER], [AS_VAR_PUSHDEF([my_Header], [ac_cv_header_$1])dnl AS_VAR_IF([my_Header], [yes], [echo "header $1 detected"])dnl AS_VAR_POPDEF([my_Header])dnl ]) MY_CHECK_HEADER([stdint.h]) for header in inttypes.h stdlib.h ; do MY_CHECK_HEADER([$header]) done @end example @noindent In the above example, @code{MY_CHECK_HEADER} can operate on polymorphic variable names. In the first invocation, the m4 argument is @code{stdint.h}, which transliterates into a literal @code{stdint_h}. As a result, the temporary macro @code{my_Header} expands to the literal shell name @samp{ac_cv_header_stdint_h}. In the second invocation, the m4 argument to @code{MY_CHECK_HEADER} is @code{$header}, and the temporary macro @code{my_Header} expands to the indirect shell name @samp{$as_my_Header}. During the shell execution of the for loop, when @samp{$header} contains @samp{inttypes.h}, then @samp{$as_my_Header} contains @samp{ac_cv_header_inttypes_h}. If this script is then run on a platform where all three headers have been previously detected, the output of the script will include: @smallexample header stdint.h detected header inttypes.h detected header stdlib.h detected @end smallexample @end defmac @defmac AS_VAR_SET (@var{var}, @ovar{value}) @asindex{VAR_SET} Emit shell code to assign the contents of the polymorphic shell variable @var{var} to the shell expansion of @var{value}. @var{value} is not subject to field splitting or file name expansion, so if command substitution is used, it may be done with @samp{`""`} rather than using an intermediate variable (@pxref{Shell Substitutions}). However, @var{value} does undergo rescanning for additional macro names; behavior is unspecified if late expansion results in any shell meta-characters. @end defmac @defmac AS_VAR_SET_IF (@var{var}, @ovar{if-set}, @ovar{if-undef}) @asindex{VAR_SET_IF} Emit a shell conditional statement, which executes @var{if-set} if the polymorphic shell variable @code{var} is set to any value, and @var{if-undef} otherwise. @end defmac @defmac AS_VAR_TEST_SET (@var{var}) @asindex{VAR_TEST_SET} Emit a shell statement that results in a successful exit status only if the polymorphic shell variable @code{var} is set. @end defmac @node Initialization Macros @section Initialization Macros @defmac AS_BOURNE_COMPATIBLE @asindex{BOURNE_COMPATIBLE} Set up the shell to be more compatible with the Bourne shell as standardized by Posix, if possible. This may involve setting environment variables, or setting options, or similar implementation-specific actions. This macro is deprecated, since @code{AS_INIT} already invokes it. @end defmac @defmac AS_INIT @asindex{INIT} @evindex LC_ALL @evindex SHELL Initialize the M4sh environment. This macro calls @code{m4_init}, then outputs the @code{#! /bin/sh} line, a notice about where the output was generated from, and code to sanitize the environment for the rest of the script. Among other initializations, this sets @env{SHELL} to the shell chosen to run the script (@pxref{CONFIG_SHELL}), and @env{LC_ALL} to ensure the C locale. Finally, it changes the current diversion to @code{BODY}. @code{AS_INIT} is called automatically by @code{AC_INIT} and @code{AT_INIT}, so shell code in @file{configure}, @file{config.status}, and @file{testsuite} all benefit from a sanitized shell environment. @end defmac @defmac AS_INIT_GENERATED (@var{file}, @ovar{comment}) @asindex{INIT_GENERATED} Emit shell code to start the creation of a subsidiary shell script in @var{file}, including changing @var{file} to be executable. This macro populates the child script with information learned from the parent (thus, the emitted code is equivalent in effect, but more efficient, than the code output by @code{AS_INIT}, @code{AS_BOURNE_COMPATIBLE}, and @code{AS_SHELL_SANITIZE}). If present, @var{comment} is output near the beginning of the child, prior to the shell initialization code, and is subject to parameter expansion, command substitution, and backslash quote removal. The parent script should check the exit status after this macro, in case @var{file} could not be properly created (for example, if the disk was full). If successfully created, the parent script can then proceed to append additional M4sh constructs into the child script. Note that the child script starts life without a log file open, so if the parent script uses logging (@pxref{AS_MESSAGE_LOG_FD}), you must temporarily disable any attempts to use the log file until after emitting code to open a log within the child. On the other hand, if the parent script has @code{AS_MESSAGE_FD} redirected somewhere besides @samp{1}, then the child script already has code that copies stdout to that descriptor. Currently, the suggested idiom for writing a M4sh shell script from within another script is: @example AS_INIT_GENERATED([@var{file}], [[# My child script. ]]) || @{ AS_ECHO(["Failed to create child script"]); AS_EXIT; @} m4_pushdef([AS_MESSAGE_LOG_FD])dnl cat >> "@var{file}" <<\__EOF__ # Code to initialize AS_MESSAGE_LOG_FD m4_popdef([AS_MESSAGE_LOG_FD])dnl # Additional code __EOF__ @end example This, however, may change in the future as the M4sh interface is stabilized further. Also, be aware that use of @env{LINENO} within the child script may report line numbers relative to their location in the parent script, even when using @code{AS_LINENO_PREPARE}, if the parent script was unable to locate a shell with working @env{LINENO} support. @end defmac @defmac AS_LINENO_PREPARE @asindex{LINENO_PREPARE} @evindex LINENO Find a shell that supports the special variable @env{LINENO}, which contains the number of the currently executing line. This macro is automatically invoked by @code{AC_INIT} in configure scripts. @end defmac @defmac AS_ME_PREPARE @asindex{ME_PREPARE} Set up variable @env{as_me} to be the basename of the currently executing script. This macro is automatically invoked by @code{AC_INIT} in configure scripts. @end defmac @defmac AS_TMPDIR (@var{prefix}, @dvar{dir, $@{TMPDIR:=/tmp@}}) @asindex{TMPDIR} @evindex TMPDIR @ovindex tmp Create, as safely as possible, a temporary sub-directory within @var{dir} with a name starting with @var{prefix}. @var{prefix} should be 2--4 characters, to make it slightly easier to identify the owner of the directory. If @var{dir} is omitted, then the value of @env{TMPDIR} will be used (defaulting to @samp{/tmp}). On success, the name of the newly created directory is stored in the shell variable @code{tmp}. On error, the script is aborted. Typically, this macro is coupled with some exit traps to delete the created directory and its contents on exit or interrupt. However, there is a slight window between when the directory is created and when the name is actually known to the shell, so an interrupt at the right moment might leave the temporary directory behind. Hence it is important to use a @var{prefix} that makes it easier to determine if a leftover temporary directory from an interrupted script is safe to delete. The use of the output variable @samp{$tmp} rather than something in the @samp{as_} namespace is historical; it has the unfortunate consequence that reusing this otherwise common name for any other purpose inside your script has the potential to break any cleanup traps designed to remove the temporary directory. @end defmac @defmac AS_SHELL_SANITIZE @asindex{SHELL_SANITIZE} Initialize the shell suitably for @command{configure} scripts. This has the effect of @code{AS_BOURNE_COMPATIBLE}, and sets some other environment variables for predictable results from configuration tests. For example, it sets @env{LC_ALL} to change to the default C locale. @xref{Special Shell Variables}. This macro is deprecated, since @code{AS_INIT} already invokes it. @end defmac @node File Descriptor Macros @section File Descriptor Macros @cindex input @cindex standard input @cindex file descriptors @cindex descriptors @cindex low-level output @cindex output, low-level The following macros define file descriptors used to output messages (or input values) from @file{configure} scripts. For example: @example echo "$wombats found" >&AS_MESSAGE_LOG_FD echo 'Enter desired kangaroo count:' >&AS_MESSAGE_FD read kangaroos <&AS_ORIGINAL_STDIN_FD` @end example @noindent However doing so is seldom needed, because Autoconf provides higher level macros as described below. @defmac AS_MESSAGE_FD @asindex{MESSAGE_FD} The file descriptor for @samp{checking for...} messages and results. By default, @code{AS_INIT} sets this to @samp{1} for standalone M4sh clients. However, @code{AC_INIT} shuffles things around to another file descriptor, in order to allow the @option{-q} option of @command{configure} to choose whether messages should go to the script's standard output or be discarded. If you want to display some messages, consider using one of the printing macros (@pxref{Printing Messages}) instead. Copies of messages output via these macros are also recorded in @file{config.log}. @end defmac @anchor{AS_MESSAGE_LOG_FD} @defmac AS_MESSAGE_LOG_FD @asindex{MESSAGE_LOG_FD} This must either be empty, or expand to a file descriptor for log messages. By default, @code{AS_INIT} sets this macro to the empty string for standalone M4sh clients, thus disabling logging. However, @code{AC_INIT} shuffles things around so that both @command{configure} and @command{config.status} use @file{config.log} for log messages. Macros that run tools, like @code{AC_COMPILE_IFELSE} (@pxref{Running the Compiler}), redirect all output to this descriptor. You may want to do so if you develop such a low-level macro. @end defmac @defmac AS_ORIGINAL_STDIN_FD @asindex{ORIGINAL_STDIN_FD} This must expand to a file descriptor for the original standard input. By default, @code{AS_INIT} sets this macro to @samp{0} for standalone M4sh clients. However, @code{AC_INIT} shuffles things around for safety. When @command{configure} runs, it may accidentally execute an interactive command that has the same name as the non-interactive meant to be used or checked. If the standard input was the terminal, such interactive programs would cause @command{configure} to stop, pending some user input. Therefore @command{configure} redirects its standard input from @file{/dev/null} during its initialization. This is not normally a problem, since @command{configure} normally does not need user input. In the extreme case where your @file{configure} script really needs to obtain some values from the original standard input, you can read them explicitly from @code{AS_ORIGINAL_STDIN_FD}. @end defmac @c =================================================== Writing Autoconf Macros. @node Writing Autoconf Macros @chapter Writing Autoconf Macros When you write a feature test that could be applicable to more than one software package, the best thing to do is encapsulate it in a new macro. Here are some instructions and guidelines for writing Autoconf macros. You should also familiarize yourself with M4sugar (@pxref{Programming in M4}) and M4sh (@pxref{Programming in M4sh}). @menu * Macro Definitions:: Basic format of an Autoconf macro * Macro Names:: What to call your new macros * Dependencies Between Macros:: What to do when macros depend on other macros * Obsoleting Macros:: Warning about old ways of doing things * Coding Style:: Writing Autoconf macros @`a la Autoconf @end menu @node Macro Definitions @section Macro Definitions @defmac AC_DEFUN (@var{name}, @ovar{body}) @acindex{DEFUN} Autoconf macros are defined using the @code{AC_DEFUN} macro, which is similar to the M4 builtin @code{m4_define} macro; this creates a macro named @var{name} and with @var{body} as its expansion. In addition to defining a macro, @code{AC_DEFUN} adds to it some code that is used to constrain the order in which macros are called, while avoiding redundant output (@pxref{Prerequisite Macros}). @end defmac An Autoconf macro definition looks like this: @example AC_DEFUN(@var{macro-name}, @var{macro-body}) @end example You can refer to any arguments passed to the macro as @samp{$1}, @samp{$2}, etc. @xref{Definitions, , How to define new macros, m4, GNU M4}, for more complete information on writing M4 macros. Most macros fall in one of two general categories. The first category includes macros which take arguments, in order to generate output parameterized by those arguments. Macros in this category are designed to be directly expanded, often multiple times, and should not be used as the argument to @code{AC_REQUIRE}. The other category includes macros which are shorthand for a fixed block of text, and therefore do not take arguments. For this category of macros, directly expanding the macro multiple times results in redundant output, so it is more common to use the macro as the argument to @code{AC_REQUIRE}, or to declare the macro with @code{AC_DEFUN_ONCE} (@pxref{One-Shot Macros}). Be sure to properly quote both the @var{macro-body} @emph{and} the @var{macro-name} to avoid any problems if the macro happens to have been previously defined. Each macro should have a header comment that gives its prototype, and a brief description. When arguments have default values, display them in the prototype. For example: @example # AC_MSG_ERROR(ERROR, [EXIT-STATUS = 1]) # -------------------------------------- m4_define([AC_MSG_ERROR], [@{ AS_MESSAGE([error: $1], [2]) exit m4_default([$2], [1]); @}]) @end example Comments about the macro should be left in the header comment. Most other comments make their way into @file{configure}, so just keep using @samp{#} to introduce comments. @cindex @code{dnl} If you have some special comments about pure M4 code, comments that make no sense in @file{configure} and in the header comment, then use the builtin @code{dnl}: it causes M4 to discard the text through the next newline. Keep in mind that @code{dnl} is rarely needed to introduce comments; @code{dnl} is more useful to get rid of the newlines following macros that produce no output, such as @code{AC_REQUIRE}. Public third-party macros need to use @code{AC_DEFUN}, and not @code{m4_define}, in order to be found by @command{aclocal} (@pxref{Extending aclocal,,, automake, GNU Automake}). Additionally, if it is ever determined that a macro should be made obsolete, it is easy to convert from @code{AC_DEFUN} to @code{AU_DEFUN} in order to have @command{autoupdate} assist the user in choosing a better alternative, but there is no corresponding way to make @code{m4_define} issue an upgrade notice (@pxref{AU_DEFUN}). There is another subtle, but important, difference between using @code{m4_define} and @code{AC_DEFUN}: only the former is unaffected by @code{AC_REQUIRE}. When writing a file, it is always safe to replace a block of text with a @code{m4_define} macro that will expand to the same text. But replacing a block of text with an @code{AC_DEFUN} macro with the same content does not necessarily give the same results, because it changes the location where any embedded but unsatisfied @code{AC_REQUIRE} invocations within the block will be expanded. For an example of this, see @ref{Expanded Before Required}. @node Macro Names @section Macro Names All of the public Autoconf macros have all-uppercase names in the namespace @samp{^AC_} to prevent them from accidentally conflicting with other text; Autoconf also reserves the namespace @samp{^_AC_} for internal macros. All shell variables that they use for internal purposes have mostly-lowercase names starting with @samp{ac_}. Autoconf also uses here-document delimiters in the namespace @samp{^_AC[A-Z]}. During @command{configure}, files produced by Autoconf make heavy use of the file system namespace @samp{^conf}. Since Autoconf is built on top of M4sugar (@pxref{Programming in M4sugar}) and M4sh (@pxref{Programming in M4sh}), you must also be aware of those namespaces (@samp{^_?\(m4\|AS\)_}). And since @file{configure.ac} is also designed to be scanned by Autoheader, Autoscan, Autoupdate, and Automake, you should be aware of the @samp{^_?A[HNUM]_} namespaces. In general, you @emph{should not use} the namespace of a package that does not own the macro or shell code you are writing. To ensure that your macros don't conflict with present or future Autoconf macros, you should prefix your own macro names and any shell variables they use with some other sequence. Possibilities include your initials, or an abbreviation for the name of your organization or software package. Historically, people have not always followed the rule of using a namespace appropriate for their package, and this has made it difficult for determining the origin of a macro (and where to report bugs about that macro), as well as difficult for the true namespace owner to add new macros without interference from pre-existing uses of third-party macros. Perhaps the best example of this confusion is the @code{AM_GNU_GETTEXT} macro, which belongs, not to Automake, but to Gettext. Most of the Autoconf macros' names follow a structured naming convention that indicates the kind of feature check by the name. The macro names consist of several words, separated by underscores, going from most general to most specific. The names of their cache variables use the same convention (@pxref{Cache Variable Names}, for more information on them). The first word of the name after the namespace initials (such as @samp{AC_}) usually tells the category of the feature being tested. Here are the categories used in Autoconf for specific test macros, the kind of macro that you are more likely to write. They are also used for cache variables, in all-lowercase. Use them where applicable; where they're not, invent your own categories. @table @code @item C C language builtin features. @item DECL Declarations of C variables in header files. @item FUNC Functions in libraries. @item GROUP Posix group owners of files. @item HEADER Header files. @item LIB C libraries. @item PROG The base names of programs. @item MEMBER Members of aggregates. @item SYS Operating system features. @item TYPE C builtin or declared types. @item VAR C variables in libraries. @end table After the category comes the name of the particular feature being tested. Any further words in the macro name indicate particular aspects of the feature. For example, @code{AC_PROG_MAKE_SET} checks whether @command{make} sets a variable to its own name. An internal macro should have a name that starts with an underscore; Autoconf internals should therefore start with @samp{_AC_}. Additionally, a macro that is an internal subroutine of another macro should have a name that starts with an underscore and the name of that other macro, followed by one or more words saying what the internal macro does. For example, @code{AC_PATH_X} has internal macros @code{_AC_PATH_X_XMKMF} and @code{_AC_PATH_X_DIRECT}. @node Dependencies Between Macros @section Dependencies Between Macros @cindex Dependencies between macros Some Autoconf macros depend on other macros having been called first in order to work correctly. Autoconf provides a way to ensure that certain macros are called if needed and a way to warn the user if macros are called in an order that might cause incorrect operation. @menu * Prerequisite Macros:: Ensuring required information * Suggested Ordering:: Warning about possible ordering problems * One-Shot Macros:: Ensuring a macro is called only once @end menu @node Prerequisite Macros @subsection Prerequisite Macros @cindex Prerequisite macros @cindex Macros, prerequisites A macro that you write might need to use values that have previously been computed by other macros. For example, @code{AC_DECL_YYTEXT} examines the output of @code{flex} or @code{lex}, so it depends on @code{AC_PROG_LEX} having been called first to set the shell variable @code{LEX}. Rather than forcing the user of the macros to keep track of the dependencies between them, you can use the @code{AC_REQUIRE} macro to do it automatically. @code{AC_REQUIRE} can ensure that a macro is only called if it is needed, and only called once. @defmac AC_REQUIRE (@var{macro-name}) @acindex{REQUIRE} If the M4 macro @var{macro-name} has not already been called, call it (without any arguments). Make sure to quote @var{macro-name} with square brackets. @var{macro-name} must have been defined using @code{AC_DEFUN} or else contain a call to @code{AC_PROVIDE} to indicate that it has been called. @code{AC_REQUIRE} must be used inside a macro defined by @code{AC_DEFUN}; it must not be called from the top level. Also, it does not make sense to require a macro that takes parameters. @end defmac @code{AC_REQUIRE} is often misunderstood. It really implements dependencies between macros in the sense that if one macro depends upon another, the latter is expanded @emph{before} the body of the former. To be more precise, the required macro is expanded before the outermost defined macro in the current expansion stack. In particular, @samp{AC_REQUIRE([FOO])} is not replaced with the body of @code{FOO}. For instance, this definition of macros: @example @group AC_DEFUN([TRAVOLTA], [test "$body_temperature_in_celsius" -gt 38 && dance_floor=occupied]) AC_DEFUN([NEWTON_JOHN], [test "x$hair_style" = xcurly && dance_floor=occupied]) @end group @group AC_DEFUN([RESERVE_DANCE_FLOOR], [if test "x`date +%A`" = xSaturday; then AC_REQUIRE([TRAVOLTA]) AC_REQUIRE([NEWTON_JOHN]) fi]) @end group @end example @noindent with this @file{configure.ac} @example AC_INIT([Dance Manager], [1.0], [bug-dance@@example.org]) RESERVE_DANCE_FLOOR if test "x$dance_floor" = xoccupied; then AC_MSG_ERROR([cannot pick up here, let's move]) fi @end example @noindent does not leave you with a better chance to meet a kindred soul on days other than Saturday, since the call to @code{RESERVE_DANCE_FLOOR} expands to: @example @group test "$body_temperature_in_Celsius" -gt 38 && dance_floor=occupied test "x$hair_style" = xcurly && dance_floor=occupied fi if test "x`date +%A`" = xSaturday; then fi @end group @end example This behavior was chosen on purpose: (i) it prevents messages in required macros from interrupting the messages in the requiring macros; (ii) it avoids bad surprises when shell conditionals are used, as in: @example @group if @dots{}; then AC_REQUIRE([SOME_CHECK]) fi @dots{} SOME_CHECK @end group @end example However, this implementation can lead to another class of problems. Consider the case where an outer macro first expands, then indirectly requires, an inner macro: @example AC_DEFUN([TESTA], [[echo in A if test -n "$SEEN_A" ; then echo duplicate ; fi SEEN_A=:]]) AC_DEFUN([TESTB], [AC_REQUIRE([TESTA])[echo in B if test -z "$SEEN_A" ; then echo bug ; fi]]) AC_DEFUN([TESTC], [AC_REQUIRE([TESTB])[echo in C]]) AC_DEFUN([OUTER], [[echo in OUTER] TESTA TESTC]) OUTER @end example @noindent Prior to Autoconf 2.64, the implementation of @code{AC_REQUIRE} recognized that @code{TESTB} needed to be hoisted prior to the expansion of @code{OUTER}, but because @code{TESTA} had already been directly expanded, it failed to hoist @code{TESTA}. Therefore, the expansion of @code{TESTB} occurs prior to its prerequisites, leading to the following output: @example in B bug in OUTER in A in C @end example @noindent Newer Autoconf is smart enough to recognize this situation, and hoists @code{TESTA} even though it has already been expanded, but issues a syntax warning in the process. This is because the hoisted expansion of @code{TESTA} defeats the purpose of using @code{AC_REQUIRE} to avoid redundant code, and causes its own set of problems if the hoisted macro is not idempotent: @example in A in B in OUTER in A duplicate in C @end example The bug is not in Autoconf, but in the macro definitions. If you ever pass a particular macro name to @code{AC_REQUIRE}, then you are implying that the macro only needs to be expanded once. But to enforce this, either the macro must be declared with @code{AC_DEFUN_ONCE} (although this only helps in Autoconf 2.64 or newer), or all uses of that macro should be through @code{AC_REQUIRE}; directly expanding the macro defeats the point of using @code{AC_REQUIRE} to eliminate redundant expansion. In the example, this rule of thumb was violated because @code{TESTB} requires @code{TESTA} while @code{OUTER} directly expands it. One way of fixing the bug is to factor @code{TESTA} into two macros, the portion designed for direct and repeated use (here, named @code{TESTA}), and the portion designed for one-shot output and used only inside @code{AC_REQUIRE} (here, named @code{TESTA_PREREQ}). Then, by fixing all clients to use the correct calling convention according to their needs: @example AC_DEFUN([TESTA], [AC_REQUIRE([TESTA_PREREQ])[echo in A]]) AC_DEFUN([TESTA_PREREQ], [[echo in A_PREREQ if test -n "$SEEN_A" ; then echo duplicate ; fi SEEN_A=:]]) AC_DEFUN([TESTB], [AC_REQUIRE([TESTA_PREREQ])[echo in B if test -z "$SEEN_A" ; then echo bug ; fi]]) AC_DEFUN([TESTC], [AC_REQUIRE([TESTB])[echo in C]]) AC_DEFUN([OUTER], [[echo in OUTER] TESTA TESTC]) OUTER @end example @noindent the resulting output will then obey all dependency rules and avoid any syntax warnings, whether the script is built with old or new Autoconf versions: @example in A_PREREQ in B in OUTER in A in C @end example You can use the helper macros @code{AS_IF} and @code{AS_CASE} in top-level code to enforce expansion of required macros outside of shell conditional constructs; these helpers are not needed in the bodies of macros defined by @code{AC_DEFUN}. You are furthermore encouraged, although not required, to put all @code{AC_REQUIRE} calls at the beginning of a macro. You can use @code{dnl} to avoid the empty lines they leave. Autoconf will normally warn if an @code{AC_REQUIRE} call refers to a macro that has not been defined. However, the @command{aclocal} tool relies on parsing an incomplete set of input files to trace which macros have been required, in order to then pull in additional files that provide those macros; for this particular use case, pre-defining the macro @code{m4_require_silent_probe} will avoid the warnings. @node Suggested Ordering @subsection Suggested Ordering @cindex Macros, ordering @cindex Ordering macros Some macros should be run before another macro if both are called, but neither @emph{requires} that the other be called. For example, a macro that changes the behavior of the C compiler should be called before any macros that run the C compiler. Many of these dependencies are noted in the documentation. Autoconf provides the @code{AC_BEFORE} macro to warn users when macros with this kind of dependency appear out of order in a @file{configure.ac} file. The warning occurs when creating @command{configure} from @file{configure.ac}, not when running @command{configure}. For example, @code{AC_PROG_CPP} checks whether the C compiler can run the C preprocessor when given the @option{-E} option. It should therefore be called after any macros that change which C compiler is being used, such as @code{AC_PROG_CC}. So @code{AC_PROG_CC} contains: @example AC_BEFORE([$0], [AC_PROG_CPP])dnl @end example @noindent This warns the user if a call to @code{AC_PROG_CPP} has already occurred when @code{AC_PROG_CC} is called. @defmac AC_BEFORE (@var{this-macro-name}, @var{called-macro-name}) @acindex{BEFORE} Make M4 print a warning message to the standard error output if @var{called-macro-name} has already been called. @var{this-macro-name} should be the name of the macro that is calling @code{AC_BEFORE}. The macro @var{called-macro-name} must have been defined using @code{AC_DEFUN} or else contain a call to @code{AC_PROVIDE} to indicate that it has been called. @end defmac @node One-Shot Macros @subsection One-Shot Macros @cindex One-shot macros @cindex Macros, called once Some macros should be called only once, either because calling them multiple time is unsafe, or because it is bad style. For instance Autoconf ensures that @code{AC_CANONICAL_BUILD} and cousins (@pxref{Canonicalizing}) are evaluated only once, because it makes no sense to run these expensive checks more than once. Such one-shot macros can be defined using @code{AC_DEFUN_ONCE}. @defmac AC_DEFUN_ONCE (@var{macro-name}, @var{macro-body}) @acindex{DEFUN_ONCE} Declare macro @var{macro-name} like @code{AC_DEFUN} would (@pxref{Macro Definitions}), but add additional logic that guarantees that only the first use of the macro (whether by direct expansion or @code{AC_REQUIRE}) causes an expansion of @var{macro-body}; the expansion will occur before the start of any enclosing macro defined by @code{AC_DEFUN}. Subsequent expansions are silently ignored. Generally, it does not make sense for @var{macro-body} to use parameters such as @code{$1}. @end defmac Prior to Autoconf 2.64, a macro defined by @code{AC_DEFUN_ONCE} would emit a warning if it was directly expanded a second time, so for portability, it is better to use @code{AC_REQUIRE} than direct invocation of @var{macro-name} inside a macro defined by @code{AC_DEFUN} (@pxref{Prerequisite Macros}). @node Obsoleting Macros @section Obsoleting Macros @cindex Obsoleting macros @cindex Macros, obsoleting Configuration and portability technology has evolved over the years. Often better ways of solving a particular problem are developed, or ad-hoc approaches are systematized. This process has occurred in many parts of Autoconf. One result is that some of the macros are now considered @dfn{obsolete}; they still work, but are no longer considered the best thing to do, hence they should be replaced with more modern macros. Ideally, @command{autoupdate} should replace the old macro calls with their modern implementation. Autoconf provides a simple means to obsolete a macro. @anchor{AU_DEFUN} @defmac AU_DEFUN (@var{old-macro}, @var{implementation}, @ovar{message}, @ovar{silent}) @auindex{DEFUN} Define @var{old-macro} as @var{implementation}, just like @code{AC_DEFUN}, but also declare @var{old-macro} to be obsolete. When @command{autoupdate} is run, occurrences of @var{old-macro} will be replaced by the text of @var{implementation} in the updated @file{configure.ac} file. If a simple textual replacement is not enough to finish the job of updating a @file{configure.ac} to modern style, provide instructions for whatever additional manual work is required as @var{message}. These instructions will be printed by @command{autoupdate}, and embedded in the updated @file{configure.ac} file, next to the text of @var{implementation}. Normally, @command{autoconf} will also issue a warning (in the ``obsolete'' category) when it expands @var{old-macro}. This warning does not include @var{message}; it only advises the maintainer to run @command{autoupdate}. If it is inappropriate to issue this warning, set the @var{silent} argument to the word @code{silent}. One might want to use a silent @code{AU_DEFUN} when @var{old-macro} is used in a widely-distributed third-party macro. If that macro's maintainers are aware of the need to update their code, it's unnecessary to nag all of the transitive users of @var{old-macro} as well. This capability was added to @code{AU_DEFUN} in Autoconf 2.70; older versions of autoconf will ignore the @var{silent} argument and issue the warning anyway. @strong{Caution:} If @var{implementation} contains M4 or M4sugar macros, they will be evaluated when @command{autoupdate} is run, not emitted verbatim like the rest of @var{implementation}. This cannot be avoided with extra quotation, because then @var{old-macro} will not work when it is called normally. See the definition of @code{AC_FOREACH} in @file{general.m4} for a workaround. @end defmac @defmac AU_ALIAS (@var{old-name}, @var{new-name}, @ovar{silent}) @auindex{ALIAS} A shorthand version of @code{AU_DEFUN}, to be used when a macro has simply been renamed. @command{autoupdate} will replace calls to @var{old-name} with calls to @var{new-name}, keeping any arguments intact. No instructions for additional manual work will be printed. The @var{silent} argument works the same as the @var{silent} argument to @code{AU_DEFUN}. It was added to @code{AU_ALIAS} in Autoconf 2.70. @strong{Caution:} @code{AU_ALIAS} cannot be used when @var{new-name} is an M4 or M4sugar macro. See above. @end defmac @node Coding Style @section Coding Style @cindex Coding style The Autoconf macros follow a strict coding style. You are encouraged to follow this style, especially if you intend to distribute your macro, either by contributing it to Autoconf itself or the @uref{https://@/www.gnu.org/@/software/@/autoconf-archive/, Autoconf Macro Archive}, or by other means. The first requirement is to pay great attention to the quotation. For more details, see @ref{Autoconf Language}, and @ref{M4 Quotation}. Do not try to invent new interfaces. It is likely that there is a macro in Autoconf that resembles the macro you are defining: try to stick to this existing interface (order of arguments, default values, etc.). We @emph{are} conscious that some of these interfaces are not perfect; nevertheless, when harmless, homogeneity should be preferred over creativity. Be careful about clashes both between M4 symbols and between shell variables. If you stick to the suggested M4 naming scheme (@pxref{Macro Names}), you are unlikely to generate conflicts. Nevertheless, when you need to set a special value, @emph{avoid using a regular macro name}; rather, use an ``impossible'' name. For instance, up to version 2.13, the macro @code{AC_SUBST} used to remember what @var{symbol} macros were already defined by setting @code{AC_SUBST_@var{symbol}}, which is a regular macro name. But since there is a macro named @code{AC_SUBST_FILE}, it was just impossible to @samp{AC_SUBST(FILE)}! In this case, @code{AC_SUBST(@var{symbol})} or @code{_AC_SUBST(@var{symbol})} should have been used (yes, with the parentheses). @c or better yet, high-level macros such as @code{m4_expand_once} No Autoconf macro should ever enter the user-variable name space; i.e., except for the variables that are the actual result of running the macro, all shell variables should start with @code{ac_}. In addition, small macros or any macro that is likely to be embedded in other macros should be careful not to use obvious names. @cindex @code{dnl} Do not use @code{dnl} to introduce comments: most of the comments you are likely to write are either header comments which are not output anyway, or comments that should make their way into @file{configure}. There are exceptional cases where you do want to comment special M4 constructs, in which case @code{dnl} is right, but keep in mind that it is unlikely. M4 ignores the leading blanks and newlines before each argument. Use this feature to indent in such a way that arguments are (more or less) aligned with the opening parenthesis of the macro being called. For instance, instead of @example AC_CACHE_CHECK(for EMX OS/2 environment, ac_cv_emxos2, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM(, [return __EMX__;])], [ac_cv_emxos2=yes], [ac_cv_emxos2=no])]) @end example @noindent write @example AC_CACHE_CHECK([for EMX OS/2 environment], [ac_cv_emxos2], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [return __EMX__;])], [ac_cv_emxos2=yes], [ac_cv_emxos2=no])]) @end example @noindent or even @example AC_CACHE_CHECK([for EMX OS/2 environment], [ac_cv_emxos2], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [return __EMX__;])], [ac_cv_emxos2=yes], [ac_cv_emxos2=no])]) @end example When using @code{AC_RUN_IFELSE} or any macro that cannot work when cross-compiling, provide a pessimistic value (typically @samp{no}). Feel free to use various tricks to prevent auxiliary tools, such as syntax-highlighting editors, from behaving improperly. For instance, instead of: @example m4_bpatsubst([$1], [$"]) @end example @noindent use @example m4_bpatsubst([$1], [$""]) @end example @noindent so that Emacsen do not open an endless ``string'' at the first quote. For the same reasons, avoid: @example test $[#] != 0 @end example @noindent and use: @example test $[@@%:@@] != 0 @end example @noindent Otherwise, the closing bracket would be hidden inside a @samp{#}-comment, breaking the bracket-matching highlighting from Emacsen. Note the preferred style to escape from M4: @samp{$[1]}, @samp{$[@@]}, etc. Do not escape when it is unnecessary. Common examples of useless quotation are @samp{[$]$1} (write @samp{$$1}), @samp{[$]var} (use @samp{$var}), etc. If you add portability issues to the picture, you'll prefer @samp{$@{1+"$[@@]"@}} to @samp{"[$]@@"}, and you'll prefer do something better than hacking Autoconf @code{:-)}. When using @command{sed}, don't use @option{-e} except for indenting purposes. With the @code{s} and @code{y} commands, the preferred separator is @samp{/} unless @samp{/} itself might appear in the pattern or replacement, in which case you should use @samp{|}, or optionally @samp{,} if you know the pattern and replacement cannot contain a file name. If none of these characters will do, choose a printable character that cannot appear in the pattern or replacement. Characters from the set @samp{"#$&'()*;<=>?`|~} are good choices if the pattern or replacement might contain a file name, since they have special meaning to the shell and are less likely to occur in file names. @xref{Macro Definitions}, for details on how to define a macro. If a macro doesn't use @code{AC_REQUIRE}, is expected to never be the object of an @code{AC_REQUIRE} directive, and macros required by other macros inside arguments do not need to be expanded before this macro, then use @code{m4_define}. In case of doubt, use @code{AC_DEFUN}. Also take into account that public third-party macros need to use @code{AC_DEFUN} in order to be found by @command{aclocal} (@pxref{Extending aclocal,,, automake, GNU Automake}). All the @code{AC_REQUIRE} statements should be at the beginning of the macro, and each statement should be followed by @code{dnl}. You should not rely on the number of arguments: instead of checking whether an argument is missing, test that it is not empty. It provides both a simpler and a more predictable interface to the user, and saves room for further arguments. Unless the macro is short, try to leave the closing @samp{])} at the beginning of a line, followed by a comment that repeats the name of the macro being defined. This introduces an additional newline in @command{configure}; normally, that is not a problem, but if you want to remove it you can use @samp{[]dnl} on the last line. You can similarly use @samp{[]dnl} after a macro call to remove its newline. @samp{[]dnl} is recommended instead of @samp{dnl} to ensure that M4 does not interpret the @samp{dnl} as being attached to the preceding text or macro output. For example, instead of: @example AC_DEFUN([AC_PATH_X], [AC_MSG_CHECKING([for X]) AC_REQUIRE_CPP() @r{# @dots{}omitted@dots{}} AC_MSG_RESULT([libraries $x_libraries, headers $x_includes]) fi]) @end example @noindent you would write: @example AC_DEFUN([AC_PATH_X], [AC_REQUIRE_CPP()[]dnl AC_MSG_CHECKING([for X]) @r{# @dots{}omitted@dots{}} AC_MSG_RESULT([libraries $x_libraries, headers $x_includes]) fi[]dnl ])# AC_PATH_X @end example If the macro is long, try to split it into logical chunks. Typically, macros that check for a bug in a function and prepare its @code{AC_LIBOBJ} replacement should have an auxiliary macro to perform this setup. Do not hesitate to introduce auxiliary macros to factor your code. In order to highlight the recommended coding style, here is a macro written the old way: @example dnl Check for EMX on OS/2. dnl _AC_EMXOS2 AC_DEFUN(_AC_EMXOS2, [AC_CACHE_CHECK(for EMX OS/2 environment, ac_cv_emxos2, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM(, return __EMX__;)], ac_cv_emxos2=yes, ac_cv_emxos2=no)]) test "x$ac_cv_emxos2" = xyes && EMXOS2=yes]) @end example @noindent and the new way: @example # _AC_EMXOS2 # ---------- # Check for EMX on OS/2. m4_define([_AC_EMXOS2], [AC_CACHE_CHECK([for EMX OS/2 environment], [ac_cv_emxos2], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [return __EMX__;])], [ac_cv_emxos2=yes], [ac_cv_emxos2=no])]) test "x$ac_cv_emxos2" = xyes && EMXOS2=yes[]dnl ])# _AC_EMXOS2 @end example @c ============================================= Portable Shell Programming @node Portable Shell @chapter Portable Shell Programming @cindex Portable shell programming When writing your own checks, there are some shell-script programming techniques you should avoid in order to make your code portable. The Bourne shell and upward-compatible shells like the Korn shell and Bash have evolved over the years, and many features added to the original System7 shell are now supported on all interesting porting targets. However, the following discussion between Russ Allbery and Robert Lipe is worth reading: @noindent Russ Allbery: @quotation The GNU assumption that @command{/bin/sh} is the one and only shell leads to a permanent deadlock. Vendors don't want to break users' existing shell scripts, and there are some corner cases in the Bourne shell that are not completely compatible with a Posix shell. Thus, vendors who have taken this route will @emph{never} (OK@dots{}``never say never'') replace the Bourne shell (as @command{/bin/sh}) with a Posix shell. @end quotation @noindent Robert Lipe: @quotation This is exactly the problem. While most (at least most System V's) do have a Bourne shell that accepts shell functions most vendor @command{/bin/sh} programs are not the Posix shell. So while most modern systems do have a shell @emph{somewhere} that meets the Posix standard, the challenge is to find it. @end quotation For this reason, part of the job of M4sh (@pxref{Programming in M4sh}) is to find such a shell. But to prevent trouble, if you're not using M4sh you should not take advantage of features that were added after Unix version 7, circa 1977 (@pxref{Systemology}); you should not use aliases, negated character classes, or even @command{unset}. @code{#} comments, while not in Unix version 7, were retrofitted in the original Bourne shell and can be assumed to be part of the least common denominator. On the other hand, if you're using M4sh you can assume that the shell has the features that were added in SVR2 (circa 1984), including shell functions, @command{return}, @command{unset}, and I/O redirection for builtins. For more information, refer to @uref{https://@/www.in-ulm.de/@/~mascheck/@/bourne/}. However, some pitfalls have to be avoided for portable use of these constructs; these will be documented in the rest of this chapter. See in particular @ref{Shell Functions} and @ref{Limitations of Builtins, , Limitations of Shell Builtins}. Some ancient systems have quite small limits on the length of the @samp{#!} line; for instance, 32 bytes (not including the newline) on SunOS 4. However, these ancient systems are no longer of practical concern. The set of external programs you should run in a @command{configure} script is fairly small. @xref{Utilities in Makefiles, , Utilities in Makefiles, standards, The GNU Coding Standards}, for the list. This restriction allows users to start out with a fairly small set of programs and build the rest, avoiding too many interdependencies between packages. Some of these external utilities have a portable subset of features; see @ref{Limitations of Usual Tools}. There are other sources of documentation about shells. The specification for the Posix @uref{https://@/pubs.opengroup.org/@/onlinepubs/@/9699919799/@/utilities/@/V3_chap02.html, Shell Command Language}, though more generous than the restrictive shell subset described above, is fairly portable nowadays. Also please see @uref{http://@/www.faqs.org/@/faqs/@/unix-faq/@/shell/, the Shell FAQs}. @menu * Shellology:: A zoology of shells * Invoking the Shell:: Invoking the shell as a command * Here-Documents:: Quirks and tricks * File Descriptors:: FDs and redirections * Signal Handling:: Shells, signals, and headaches * File System Conventions:: File names * Shell Pattern Matching:: Pattern matching * Shell Substitutions:: Variable and command expansions * Assignments:: Varying side effects of assignments * Parentheses:: Parentheses in shell scripts * Slashes:: Slashes in shell scripts * Special Shell Variables:: Variables you should not change * Shell Functions:: What to look out for if you use them * Limitations of Builtins:: Portable use of not so portable /bin/sh * Limitations of Usual Tools:: Portable use of portable tools @end menu @node Shellology @section Shellology @cindex Shellology There are several families of shells, most prominently the Bourne family and the C shell family which are deeply incompatible. If you want to write portable shell scripts, avoid members of the C shell family. The @uref{http://@/www.faqs.org/@/faqs/@/unix-faq/@/shell/@/shell-differences/, the Shell difference FAQ} includes a small history of Posix shells, and a comparison between several of them. Below we describe some of the members of the Bourne shell family. @table @asis @item Ash @cindex Ash Ash is often used on GNU/Linux and BSD systems as a light-weight Bourne-compatible shell. Ash 0.2 has some bugs that are fixed in the 0.3.x series, but portable shell scripts should work around them, since version 0.2 is still shipped with many GNU/Linux distributions. To be compatible with Ash 0.2: @itemize @minus @item don't use @samp{$?} after expanding empty or unset variables, or at the start of an @command{eval}: @example foo= false $foo echo "Do not use it: $?" false eval 'echo "Do not use it: $?"' @end example @item don't use command substitution within variable expansion: @example cat $@{FOO=`bar`@} @end example @item beware that single builtin substitutions are not performed by a subshell, hence their effect applies to the current shell! @xref{Shell Substitutions}, item ``Command Substitution''. @end itemize @item Bash @cindex Bash To detect whether you are running Bash, test whether @code{BASH_VERSION} is set. To require Posix compatibility, run @samp{set -o posix}. @xref{Bash POSIX Mode, , Bash Posix Mode, bash, The GNU Bash Reference Manual}, for details. @item Bash 2.05 and later @cindex Bash 2.05 and later Versions 2.05 and later of Bash use a different format for the output of the @command{set} builtin, designed to make evaluating its output easier. However, this output is not compatible with earlier versions of Bash (or with many other shells, probably). So if you use Bash 2.05 or higher to execute @command{configure}, you'll need to use Bash 2.05 for all other build tasks as well. @item Ksh @cindex Ksh @cindex Korn shell @prindex @samp{ksh} @prindex @samp{ksh88} @prindex @samp{ksh93} The Korn shell is compatible with the Bourne family and it mostly conforms to Posix. It has two major variants commonly called @samp{ksh88} and @samp{ksh93}, named after the years of initial release. It is usually called @command{ksh}, but is called @command{sh} on some hosts if you set your path appropriately. On Solaris 11, @command{/bin/sh} and @command{/usr/bin/ksh} are both @samp{ksh93}. On Solaris 10 and earlier, @command{/bin/sh} is a pre-Posix Bourne shell and the Korn shell is found elsewhere: @prindex @command{/usr/bin/ksh} on Solaris @command{/usr/bin/ksh} is @samp{ksh88} on Solaris 2.0 through 10, @prindex @command{/usr/xpg4/bin/sh} on Solaris @command{/usr/xpg4/bin/sh} is a Posix-compliant variant of @samp{ksh88} on Solaris 9 and later, @prindex @command{/usr/dt/bin/dtksh} on Solaris and @command{/usr/dt/bin/dtksh} is @samp{ksh93}. Variants that are not standard may be parts of optional packages. There is no extra charge for these packages, but they are not part of a minimal OS install and therefore some installations may not have it. Starting with Tru64 Version 4.0, the Korn shell @command{/usr/bin/ksh} is also available as @command{/usr/bin/posix/sh}. If the environment variable @env{BIN_SH} is set to @code{xpg4}, subsidiary invocations of the standard shell conform to Posix. @item Pdksh @prindex @samp{pdksh} A public-domain clone of the Korn shell called @command{pdksh} is widely available: it has most of the @samp{ksh88} features along with a few of its own. It usually sets @code{KSH_VERSION}, except if invoked as @command{/bin/sh} on OpenBSD, and similarly to Bash you can require Posix compatibility by running @samp{set -o posix}. Unfortunately, with @command{pdksh} 5.2.14 (the latest stable version as of January 2007) Posix mode is buggy and causes @command{pdksh} to depart from Posix in at least one respect, see @ref{Shell Substitutions}. @item Zsh @cindex Zsh To detect whether you are running @command{zsh}, test whether @code{ZSH_VERSION} is set. By default @command{zsh} is @emph{not} compatible with the Bourne shell: you must execute @samp{emulate sh}, and for @command{zsh} versions before 3.1.6-dev-18 you must also set @code{NULLCMD} to @samp{:}. @xref{Compatibility, , Compatibility, zsh, The Z Shell Manual}, for details. The default Mac OS X @command{sh} was originally Zsh; it was changed to Bash in Mac OS X 10.2. @end table @node Invoking the Shell @section Invoking the Shell @cindex invoking the shell @cindex shell invocation The Korn shell (up to at least version M-12/28/93d) has a bug when invoked on a file whose name does not contain a slash. It first searches for the file's name in @env{PATH}, and if found it executes that rather than the original file. For example, assuming there is a binary executable @file{/usr/bin/script} in your @env{PATH}, the last command in the following example fails because the Korn shell finds @file{/usr/bin/script} and refuses to execute it as a shell script: @example $ @kbd{touch xxyzzyz script} $ @kbd{ksh xxyzzyz} $ @kbd{ksh ./script} $ @kbd{ksh script} ksh: script: cannot execute @end example Bash 2.03 has a bug when invoked with the @option{-c} option: if the option-argument ends in backslash-newline, Bash incorrectly reports a syntax error. The problem does not occur if a character follows the backslash: @example $ @kbd{$ bash -c 'echo foo \} > @kbd{'} bash: -c: line 2: syntax error: unexpected end of file $ @kbd{bash -c 'echo foo \} > @kbd{ '} foo @end example @noindent @xref{Backslash-Newline-Empty}, for how this can cause problems in makefiles. @node Here-Documents @section Here-Documents @cindex Here-documents @cindex Shell here-documents Don't rely on @samp{\} being preserved just because it has no special meaning together with the next symbol. In the native @command{sh} on OpenBSD 2.7 @samp{\"} expands to @samp{"} in here-documents with unquoted delimiter. As a general rule, if @samp{\\} expands to @samp{\} use @samp{\\} to get @samp{\}. With OpenBSD 2.7's @command{sh} @example @group $ @kbd{cat <<EOF > \" \\ > EOF} " \ @end group @end example @noindent and with Bash: @example @group bash-2.04$ @kbd{cat <<EOF > \" \\ > EOF} \" \ @end group @end example Using command substitutions in a here-document that is fed to a shell function is not portable. For example, with Solaris 10 @command{/bin/sh}: @example $ @kbd{kitty () @{ cat; @}} $ @kbd{kitty <<EOF > `echo ok` > EOF} /tmp/sh199886: cannot open $ @kbd{echo $?} 1 @end example Some shells mishandle large here-documents: for example, Solaris 10 @command{dtksh} and the UnixWare 7.1.1 Posix shell, which are derived from Korn shell version M-12/28/93d, mishandle braced variable expansion that crosses a 1024- or 4096-byte buffer boundary within a here-document. Only the part of the variable name after the boundary is used. For example, @code{$@{variable@}} could be replaced by the expansion of @code{$@{ble@}}. If the end of the variable name is aligned with the block boundary, the shell reports an error, as if you used @code{$@{@}}. Instead of @code{$@{variable-default@}}, the shell may expand @code{$@{riable-default@}}, or even @code{$@{fault@}}. This bug can often be worked around by omitting the braces: @code{$variable}. The bug was fixed in @samp{ksh93g} (1998-04-30) but as of 2006 many operating systems were still shipping older versions with the bug. Empty here-documents are not portable either; with the following code, @command{zsh} up to at least version 4.3.10 creates a file with a single newline, whereas other shells create an empty file: @example cat >file <<EOF EOF @end example Many shells (including the Bourne shell) implement here-documents inefficiently. In particular, some shells can be extremely inefficient when a single statement contains many here-documents. For instance if your @file{configure.ac} includes something like: @example @group if <cross_compiling>; then assume this and that else check this check that check something else @dots{} on and on forever @dots{} fi @end group @end example A shell parses the whole @code{if}/@code{fi} construct, creating temporary files for each here-document in it. Some shells create links for such here-documents on every @code{fork}, so that the clean-up code they had installed correctly removes them. It is creating the links that can take the shell forever. Moving the tests out of the @code{if}/@code{fi}, or creating multiple @code{if}/@code{fi} constructs, would improve the performance significantly. Anyway, this kind of construct is not exactly the typical use of Autoconf. In fact, it's even not recommended, because M4 macros can't look into shell conditionals, so we may fail to expand a macro when it was expanded before in a conditional path, and the condition turned out to be false at runtime, and we end up not executing the macro at all. Be careful with the use of @samp{<<-} to unindent here-documents. The behavior is only portable for stripping leading @key{TAB}s, and things can silently break if an overzealous editor converts to using leading spaces (not all shells are nice enough to warn about unterminated here-documents). @example $ @kbd{printf 'cat <<-x\n\t1\n\t 2\n\tx\n' | bash && echo done} 1 2 done $ @kbd{printf 'cat <<-x\n 1\n 2\n x\n' | bash-3.2 && echo done} 1 2 x done @end example @node File Descriptors @section File Descriptors @cindex Descriptors @cindex File descriptors @cindex Shell file descriptors Most shells, if not all (including Bash, Zsh, Ash), output traces on stderr, even for subshells. This might result in undesirable content if you meant to capture the standard-error output of the inner command: @example $ @kbd{ash -x -c '(eval "echo foo >&2") 2>stderr'} $ @kbd{cat stderr} + eval echo foo >&2 + echo foo foo $ @kbd{bash -x -c '(eval "echo foo >&2") 2>stderr'} $ @kbd{cat stderr} + eval 'echo foo >&2' ++ echo foo foo $ @kbd{zsh -x -c '(eval "echo foo >&2") 2>stderr'} @i{# Traces on startup files deleted here.} $ @kbd{cat stderr} +zsh:1> eval echo foo >&2 +zsh:1> echo foo foo @end example @noindent One workaround is to grep out uninteresting lines, hoping not to remove good ones. If you intend to redirect both standard error and standard output, redirect standard output first. This works better with HP-UX, since its shell mishandles tracing if standard error is redirected first: @example $ @kbd{sh -x -c ': 2>err >out'} + : + 2> err $ @kbd{cat err} 1> out @end example Don't try to redirect the standard error of a command substitution. It must be done @emph{inside} the command substitution. When running @samp{: `cd /zorglub` 2>/dev/null} expect the error message to escape, while @samp{: `cd /zorglub 2>/dev/null`} works properly. On the other hand, some shells, such as Solaris or FreeBSD @command{/bin/sh}, warn about missing programs before performing redirections. Therefore, to silently check whether a program exists, it is necessary to perform redirections on a subshell or brace group: @example $ @kbd{/bin/sh -c 'nosuch 2>/dev/null'} nosuch: not found $ @kbd{/bin/sh -c '(nosuch) 2>/dev/null'} $ @kbd{/bin/sh -c '@{ nosuch; @} 2>/dev/null'} $ @kbd{bash -c 'nosuch 2>/dev/null'} @end example FreeBSD 6.2 sh may mix the trace output lines from the statements in a shell pipeline. It is worth noting that Zsh (but not Ash nor Bash) makes it possible in assignments though: @samp{foo=`cd /zorglub` 2>/dev/null}. Some shells, like @command{ash}, don't recognize bi-directional redirection (@samp{<>}). And even on shells that recognize it, it is not portable to use on fifos: Posix does not require read-write support for named pipes, and Cygwin does not support it: @example $ @kbd{mkfifo fifo} $ @kbd{exec 5<>fifo} $ @kbd{echo hi >&5} bash: echo: write error: Communication error on send @end example @noindent Furthermore, versions of @command{dash} before 0.5.6 mistakenly truncate regular files when using @samp{<>}: @example $ @kbd{echo a > file} $ @kbd{bash -c ': 1<>file'; cat file} a $ @kbd{dash -c ': 1<>file'; cat file} $ rm a @end example Solaris 10 @code{/bin/sh} executes redirected compound commands in a subshell, while other shells don't: @example $ @kbd{/bin/sh -c 'foo=0; @{ foo=1; @} 2>/dev/null; echo $foo'} 0 $ @kbd{ksh -c 'foo=0; @{ foo=1; @} 2>/dev/null; echo $foo'} 1 $ @kbd{bash -c 'foo=0; @{ foo=1; @} 2>/dev/null; echo $foo'} 1 @end example When catering to old systems, don't redirect the same file descriptor several times, as you are doomed to failure under Ultrix. @example ULTRIX V4.4 (Rev. 69) System #31: Thu Aug 10 19:42:23 GMT 1995 UWS V4.4 (Rev. 11) $ @kbd{eval 'echo matter >fullness' >void} illegal io $ @kbd{eval '(echo matter >fullness)' >void} illegal io $ @kbd{(eval '(echo matter >fullness)') >void} Ambiguous output redirect. @end example @noindent In each case the expected result is of course @file{fullness} containing @samp{matter} and @file{void} being empty. However, this bug is probably not of practical concern to modern platforms. Solaris 10 @command{sh} will try to optimize away a @command{:} command (even if it is redirected) in a loop after the first iteration, or in a shell function after the first call: @example $ @kbd{for i in 1 2 3 ; do : >x$i; done} $ @kbd{ls x*} x1 $ @kbd{f () @{ : >$1; @}; f y1; f y2; f y3;} $ @kbd{ls y*} y1 @end example @noindent As a workaround, @command{echo} or @command{eval} can be used. Don't rely on file descriptors 0, 1, and 2 remaining closed in a subsidiary program. If any of these descriptors is closed, the operating system may open an unspecified file for the descriptor in the new process image. Posix 2008 says this may be done only if the subsidiary program is set-user-ID or set-group-ID, but HP-UX 11.23 does it even for ordinary programs, and the next version of Posix will allow HP-UX behavior. If you want a file descriptor above 2 to be inherited into a child process, then you must use redirections specific to that command or a containing subshell or command group, rather than relying on @command{exec} in the shell. In @command{ksh} as well as HP-UX @command{sh}, file descriptors above 2 which are opened using @samp{exec @var{n}>file} are closed by a subsequent @samp{exec} (such as that involved in the fork-and-exec which runs a program or script): @example $ @kbd{echo 'echo hello >&5' >k} $ @kbd{/bin/sh -c 'exec 5>t; ksh ./k; exec 5>&-; cat t} hello $ @kbd{bash -c 'exec 5>t; ksh ./k; exec 5>&-; cat t} hello $ @kbd{ksh -c 'exec 5>t; ksh ./k; exec 5>&-; cat t} ./k[1]: 5: cannot open [Bad file number] $ @kbd{ksh -c '(ksh ./k) 5>t; cat t'} hello $ @kbd{ksh -c '@{ ksh ./k; @} 5>t; cat t'} hello $ @kbd{ksh -c '5>t ksh ./k; cat t} hello @end example Don't rely on duplicating a closed file descriptor to cause an error. With Solaris 10 @command{/bin/sh}, failed duplication is silently ignored, which can cause unintended leaks to the original file descriptor. In this example, observe the leak to standard output: @example $ @kbd{bash -c 'echo hi >&3' 3>&-; echo $?} bash: 3: Bad file descriptor 1 $ @kbd{/bin/sh -c 'echo hi >&3' 3>&-; echo $?} hi 0 @end example Fortunately, an attempt to close an already closed file descriptor will portably succeed. Likewise, it is safe to use either style of @samp{@var{n}<&-} or @samp{@var{n}>&-} for closing a file descriptor, even if it doesn't match the read/write mode that the file descriptor was opened with. DOS variants cannot rename or remove open files, such as in @samp{mv foo bar >foo} or @samp{rm foo >foo}, even though this is perfectly portable among Posix hosts. A few ancient systems reserved some file descriptors. By convention, file descriptor 3 was opened to @file{/dev/tty} when you logged into Eighth Edition (1985) through Tenth Edition Unix (1989). File descriptor 4 had a special use on the Stardent/Kubota Titan (circa 1990), though we don't now remember what it was. Both these systems are obsolete, so it's now safe to treat file descriptors 3 and 4 like any other file descriptors. On the other hand, you can't portably use multi-digit file descriptors. @command{dash} and Solaris @command{ksh} don't understand any file descriptor larger than @samp{9}: @example $ @kbd{bash -c 'exec 10>&-'; echo $?} 0 $ @kbd{ksh -c 'exec 9>&-'; echo $?} 0 $ @kbd{ksh -c 'exec 10>&-'; echo $?} ksh[1]: exec: 10: not found 127 $ @kbd{dash -c 'exec 9>&-'; echo $?} 0 $ @kbd{dash -c 'exec 10>&-'; echo $?} exec: 1: 10: not found 2 @end example @c <https://lists.gnu.org/archive/html/bug-autoconf/2011-09/msg00004.html> @node Signal Handling @section Signal Handling @cindex Signal handling in the shell @cindex Signals, shells and Portable handling of signals within the shell is another major source of headaches. This is worsened by the fact that various different, mutually incompatible approaches are possible in this area, each with its distinctive merits and demerits. A detailed description of these possible approaches, as well as of their pros and cons, can be found in @uref{https://www.cons.org/cracauer/sigint.html, this article}. Solaris 10 @command{/bin/sh} automatically traps most signals by default; the shell still exits with error upon termination by one of those signals, but in such a case the exit status might be somewhat unexpected (even if allowed by POSIX, strictly speaking): @c FIXME: We had a reference for this behavior but the website no longer @c exists and the page is not in the Internet Archive. --zw 2020-07-10. @example $ @kbd{bash -c 'kill -1 $$'; echo $?} # Will exit 128 + (signal number). Hangup 129 $ @kbd{/bin/ksh -c 'kill -15 $$'; echo $?} # Likewise. Terminated 143 $ @kbd{for sig in 1 2 3 15; do} > @kbd{ echo $sig:} > @kbd{ /bin/sh -c "kill -$s \$\$"; echo $?} > @kbd{done} signal 1: Hangup 129 signal 2: 208 signal 3: 208 signal 15: 208 @end example This gets even worse if one is using the POSIX ``wait'' interface to get details about the shell process terminations: it will result in the shell having exited normally, rather than by receiving a signal. @example $ @kbd{cat > foo.c <<'END'} #include <stdio.h> /* for printf */ #include <stdlib.h> /* for system */ #include <sys/wait.h> /* for WIF* macros */ int main(void) @{ int status = system ("kill -15 $$"); printf ("Terminated by signal: %s\n", WIFSIGNALED (status) ? "yes" : "no"); printf ("Exited normally: %s\n", WIFEXITED (status) ? "yes" : "no"); return 0; @} END @c $$ font-lock $ @kbd{cc -o foo foo.c} $ @kbd{./a.out} # On GNU/Linux Terminated by signal: no Exited normally: yes $ @kbd{./a.out} # On Solaris 10 Terminated by signal: yes Exited normally: no @end example Various shells seem to handle @code{SIGQUIT} specially: they ignore it even if it is not blocked, and even if the shell is not running interactively (in fact, even if the shell has no attached tty); among these shells are at least Bash (from version 2 onward), Zsh 4.3.12, Solaris 10 @code{/bin/ksh} and @code{/usr/xpg4/bin/sh}, and AT&T @code{ksh93} (2011). Still, @code{SIGQUIT} seems to be trappable quite portably within all these shells. OTOH, some other shells doesn't special-case the handling of @code{SIGQUIT}; among these shells are at least @code{pdksh} 5.2.14, Solaris 10 and NetBSD 5.1 @code{/bin/sh}, and the Almquist Shell 0.5.5.1. Some shells (especially Korn shells and derivatives) might try to propagate to themselves a signal that has killed a child process; this is not a bug, but a conscious design choice (although its overall value might be debatable). The exact details of how this is attained vary from shell to shell. For example, upon running @code{perl -e 'kill 2, $$'}, after the perl process has been interrupted, AT&T @code{ksh93} (2011) will proceed to send itself a @code{SIGINT}, while Solaris 10 @code{/bin/ksh} and @code{/usr/xpg4/bin/sh} will proceed to exit with status 130 (i.e., 128 + 2). In any case, if there is an active trap associated with @code{SIGINT}, those shells will correctly execute it. @c See: <https://www.austingroupbugs.net/view.php?id=51> Some Korn shells, when a child process die due receiving a signal with signal number @var{n}, can leave in @samp{$?} an exit status of 256+@var{n} instead of the more common 128+@var{n}. Observe the difference between AT&T @code{ksh93} (2011) and @code{bash} 4.1.5 on Debian: @example $ @kbd{/bin/ksh -c 'sh -c "kill -1 \$\$"; echo $?'} /bin/ksh: line 1: 7837: Hangup 257 $ @kbd{/bin/bash -c 'sh -c "kill -1 \$\$"; echo $?'} /bin/bash: line 1: 7861 Hangup (sh -c "kill -1 \$\$") 129 @end example @noindent This @command{ksh} behavior is allowed by POSIX, if implemented with due care; see this @uref{https://www.austingroupbugs.net/view.php?id=51, Austin Group discussion} for more background. However, if it is not implemented with proper care, such a behavior might cause problems in some corner cases. To see why, assume we have a ``wrapper'' script like this: @example #!/bin/sh # Ignore some signals in the shell only, not in its child processes. trap : 1 2 13 15 wrapped_command "$@@" ret=$? other_command exit $ret @end example @noindent If @command{wrapped_command} is interrupted by a @code{SIGHUP} (which has signal number 1), @code{ret} will be set to 257. Unless the @command{exit} shell builtin is smart enough to understand that such a value can only have originated from a signal, and adjust the final wait status of the shell appropriately, the value 257 will just get truncated to 1 by the closing @code{exit} call, so that a caller of the script will have no way to determine that termination by a signal was involved. Observe the different behavior of AT&T @code{ksh93} (2011) and @code{bash} 4.1.5 on Debian: @example $ @kbd{cat foo.sh} #!/bin/sh sh -c 'kill -1 $$' ret=$? echo $ret exit $ret $ @kbd{/bin/ksh foo.sh; echo $?} foo.sh: line 2: 12479: Hangup 257 1 $ @kbd{/bin/bash foo.sh; echo $?} foo.sh: line 2: 12487 Hangup (sh -c 'kill -1 $$') 129 129 @end example @node File System Conventions @section File System Conventions @cindex File system conventions Autoconf uses shell-script processing extensively, so the file names that it processes should not contain characters that are special to the shell. Special characters include space, tab, newline, NUL, and the following: @example " # $ & ' ( ) * ; < = > ? [ \ ` | @end example Also, file names should not begin with @samp{~} or @samp{-}, and should contain neither @samp{-} immediately after @samp{/} nor @samp{~} immediately after @samp{:}. On Posix-like platforms, directory names should not contain @samp{:}, as this runs afoul of @samp{:} used as the path separator. These restrictions apply not only to the files that you distribute, but also to the absolute file names of your source, build, and destination directories. On some Posix-like platforms, @samp{!} and @samp{^} are special too, so they should be avoided. Posix lets implementations treat leading @file{//} specially, but requires leading @file{///} and beyond to be equivalent to @file{/}. Most Unix variants treat @file{//} like @file{/}. However, some treat @file{//} as a ``super-root'' that can provide access to files that are not otherwise reachable from @file{/}. The super-root tradition began with Apollo Domain/OS, which died out long ago, but unfortunately Cygwin has revived it. While @command{autoconf} and friends are usually run on some Posix variety, they can be used on other systems, most notably DOS variants. This impacts several assumptions regarding file names. @noindent For example, the following code: @example case $foo_dir in /*) # Absolute ;; *) foo_dir=$dots$foo_dir ;; esac @end example @noindent fails to properly detect absolute file names on those systems, because they can use a drivespec, and usually use a backslash as directory separator. If you want to be portable to DOS variants (at the price of rejecting valid but oddball Posix file names like @file{a:\b}), you can check for absolute file names like this: @cindex absolute file names, detect @example case $foo_dir in [\\/]* | ?:[\\/]* ) # Absolute ;; *) foo_dir=$dots$foo_dir ;; esac @end example @noindent Make sure you quote the brackets if appropriate and keep the backslash as first character. @xref{case, , Limitations of Shell Builtins}. Also, because the colon is used as part of a drivespec, these systems don't use it as path separator. When creating or accessing paths, you can use the @code{PATH_SEPARATOR} output variable instead. @command{configure} sets this to the appropriate value for the build system (@samp{:} or @samp{;}) when it starts up. File names need extra care as well. While DOS variants that are Posixy enough to run @command{autoconf} (such as DJGPP) are usually able to handle long file names properly, there are still limitations that can seriously break packages. Several of these issues can be easily detected by the @uref{https://@/ftp.gnu.org/@/gnu/@/non-gnu/@/doschk/@/doschk-1.1.tar.gz, doschk} package. A short overview follows; problems are marked with SFN/LFN to indicate where they apply: SFN means the issues are only relevant to plain DOS, not to DOS under Microsoft Windows variants, while LFN identifies problems that exist even under Microsoft Windows variants. @table @asis @item No multiple dots (SFN) DOS cannot handle multiple dots in file names. This is an especially important thing to remember when building a portable configure script, as @command{autoconf} uses a .in suffix for template files. This is perfectly OK on Posix variants: @example AC_CONFIG_HEADERS([config.h]) AC_CONFIG_FILES([source.c foo.bar]) AC_OUTPUT @end example @noindent but it causes problems on DOS, as it requires @samp{config.h.in}, @samp{source.c.in} and @samp{foo.bar.in}. To make your package more portable to DOS-based environments, you should use this instead: @example AC_CONFIG_HEADERS([config.h:config.hin]) AC_CONFIG_FILES([source.c:source.cin foo.bar:foobar.in]) AC_OUTPUT @end example @item No leading dot (SFN) DOS cannot handle file names that start with a dot. This is usually not important for @command{autoconf}. @item Case insensitivity (LFN) DOS is case insensitive, so you cannot, for example, have both a file called @samp{INSTALL} and a directory called @samp{install}. This also affects @command{make}; if there's a file called @samp{INSTALL} in the directory, @samp{make install} does nothing (unless the @samp{install} target is marked as PHONY). @item The 8+3 limit (SFN) Because the DOS file system only stores the first 8 characters of the file name and the first 3 of the extension, those must be unique. That means that @file{foobar-part1.c}, @file{foobar-part2.c} and @file{foobar-prettybird.c} all resolve to the same file name (@file{FOOBAR-P.C}). The same goes for @file{foo.bar} and @file{foo.bartender}. The 8+3 limit is not usually a problem under Microsoft Windows, as it uses numeric tails in the short version of file names to make them unique. However, a registry setting can turn this behavior off. While this makes it possible to share file trees containing long file names between SFN and LFN environments, it also means the above problem applies there as well. @item Invalid characters (LFN) Some characters are invalid in DOS file names, and should therefore be avoided. In a LFN environment, these are @samp{/}, @samp{\}, @samp{?}, @samp{*}, @samp{:}, @samp{<}, @samp{>}, @samp{|} and @samp{"}. In a SFN environment, other characters are also invalid. These include @samp{+}, @samp{,}, @samp{[} and @samp{]}. @item Invalid names (LFN) Some DOS file names are reserved, and cause problems if you try to use files with those names. These names include @file{CON}, @file{AUX}, @file{COM1}, @file{COM2}, @file{COM3}, @file{COM4}, @file{LPT1}, @file{LPT2}, @file{LPT3}, @file{NUL}, and @file{PRN}. File names are case insensitive, so even names like @file{aux/config.guess} are disallowed. @end table @node Shell Pattern Matching @section Shell Pattern Matching @cindex Shell pattern matching Nowadays portable patterns can use negated character classes like @samp{[!-aeiou]}. The older syntax @samp{[^-aeiou]} is supported by some shells but not others; hence portable scripts should never use @samp{^} as the first character of a bracket pattern. Outside the C locale, patterns like @samp{[a-z]} are problematic since they may match characters that are not lower-case letters. @node Shell Substitutions @section Shell Substitutions @cindex Shell substitutions Contrary to a persistent urban legend, the Bourne shell does not systematically split variables and back-quoted expressions, in particular on the right-hand side of assignments and in the argument of @code{case}. For instance, the following code: @example case "$given_srcdir" in .) top_srcdir="`echo "$dots" | sed 's|/$||'`" ;; *) top_srcdir="$dots$given_srcdir" ;; esac @end example @noindent is more readable when written as: @example case $given_srcdir in .) top_srcdir=`echo "$dots" | sed 's|/$||'` ;; *) top_srcdir=$dots$given_srcdir ;; esac @end example @noindent and in fact it is even @emph{more} portable: in the first case of the first attempt, the computation of @code{top_srcdir} is not portable, since not all shells properly understand @code{"`@dots{}"@dots{}"@dots{}`"}, for example Solaris 10 @command{ksh}: @example $ @kbd{foo="`echo " bar" | sed 's, ,,'`"} ksh: : cannot execute ksh: bar | sed 's, ,,': cannot execute @end example @noindent Posix does not specify behavior for this sequence. On the other hand, behavior for @code{"`@dots{}\"@dots{}\"@dots{}`"} is specified by Posix, but in practice, not all shells understand it the same way: pdksh 5.2.14 prints spurious quotes when in Posix mode: @example $ @kbd{echo "`echo \"hello\"`"} hello $ @kbd{set -o posix} $ @kbd{echo "`echo \"hello\"`"} "hello" @end example @noindent There is just no portable way to use double-quoted strings inside double-quoted back-quoted expressions (pfew!). Bash 4.1 has a bug where quoted empty strings adjacent to unquoted parameter expansions are elided during word splitting. Meanwhile, zsh does not perform word splitting except when in Bourne compatibility mode. In the example below, the correct behavior is to have five arguments to the function, and exactly two spaces on either side of the middle @samp{-}, since word splitting collapses multiple spaces in @samp{$f} but leaves empty arguments intact. @example $ @kbd{bash -c 'n() @{ echo "$#$@@"; @}; f=" - "; n - ""$f"" -'} 3- - - $ @kbd{ksh -c 'n() @{ echo "$#$@@"; @}; f=" - "; n - ""$f"" -'} 5- - - $ @kbd{zsh -c 'n() @{ echo "$#$@@"; @}; f=" - "; n - ""$f"" -'} 3- - - $ @kbd{zsh -c 'emulate sh;} > @kbd{n() @{ echo "$#$@@"; @}; f=" - "; n - ""$f"" -'} 5- - - @end example @noindent You can work around this by doing manual word splitting, such as using @samp{"$str" $list} rather than @samp{"$str"$list}. There are also portability pitfalls with particular expansions: @table @code @item $@@ @cindex @code{"$@@"} One of the most famous shell-portability issues is related to @samp{"$@@"}. When there are no positional arguments, Posix says that @samp{"$@@"} is supposed to be equivalent to nothing, but the original Unix version 7 Bourne shell treated it as equivalent to @samp{""} instead, and this behavior survives in later implementations like Digital Unix 5.0. The traditional way to work around this portability problem is to use @samp{$@{1+"$@@"@}}. Unfortunately this method does not work with Zsh (3.x and 4.x), which is used on Mac OS X@. When emulating the Bourne shell, Zsh performs word splitting on @samp{$@{1+"$@@"@}}: @example zsh $ @kbd{emulate sh} zsh $ @kbd{for i in "$@@"; do echo $i; done} Hello World ! zsh $ @kbd{for i in $@{1+"$@@"@}; do echo $i; done} Hello World ! @end example @noindent Zsh handles plain @samp{"$@@"} properly, but we can't use plain @samp{"$@@"} because of the portability problems mentioned above. One workaround relies on Zsh's ``global aliases'' to convert @samp{$@{1+"$@@"@}} into @samp{"$@@"} by itself: @example test $@{ZSH_VERSION+y@} && alias -g '$@{1+"$@@"@}'='"$@@"' @end example Zsh only recognizes this alias when a shell word matches it exactly; @samp{"foo"$@{1+"$@@"@}} remains subject to word splitting. Since this case always yields at least one shell word, use plain @samp{"$@@"}. A more conservative workaround is to avoid @samp{"$@@"} if it is possible that there may be no positional arguments. For example, instead of: @example cat conftest.c "$@@" @end example you can use this instead: @example case $# in 0) cat conftest.c;; *) cat conftest.c "$@@";; esac @end example Autoconf macros often use the @command{set} command to update @samp{$@@}, so if you are writing shell code intended for @command{configure} you should not assume that the value of @samp{$@@} persists for any length of time. @item $@{10@} @cindex positional parameters The 10th, 11th, @dots{} positional parameters can be accessed only after a @code{shift}. The 7th Edition shell reported an error if given @code{$@{10@}}, and Solaris 10 @command{/bin/sh} still acts that way: @example $ @kbd{set 1 2 3 4 5 6 7 8 9 10} $ @kbd{echo $@{10@}} bad substitution @end example Conversely, not all shells obey the Posix rule that when braces are omitted, multiple digits beyond a @samp{$} imply the single-digit positional parameter expansion concatenated with the remaining literal digits. To work around the issue, you must use braces. @example $ @kbd{bash -c 'set a b c d e f g h i j; echo $10 $@{1@}0'} a0 a0 $ @kbd{dash -c 'set a b c d e f g h i j; echo $10 $@{1@}0'} j a0 @end example @item $@{@var{var}:-@var{value}@} @itemx $@{@var{var}:=@var{value}@} @itemx $@{@var{var}:?@var{value}@} @itemx $@{@var{var}:+@var{value}@} @c Info cannot handle ':' in index entries. @ifnotinfo @cindex @code{$@{@var{var}:-@var{value}@}} @cindex @code{$@{@var{var}:=@var{value}@}} @cindex @code{$@{@var{var}:?@var{value}@}} @cindex @code{$@{@var{var}:+@var{value}@}} @end ifnotinfo Old BSD shells, including the Ultrix @code{sh}, don't accept the colon for any shell substitution, and complain and die. Similarly for @code{$@{@var{var}:=@var{value}@}}, @code{$@{@var{var}:?@var{value}@}}, etc. However, all shells that support functions allow the use of colon in shell substitution, and since m4sh requires functions, you can portably use null variable substitution patterns in configure scripts. @item $@{@var{var}-@var{value}@} @itemx $@{@var{var}:-@var{value}@} @itemx $@{@var{var}=@var{value}@} @itemx $@{@var{var}:=@var{value}@} @itemx $@{@var{var}?@var{value}@} @itemx $@{@var{var}:?@var{value}@} @itemx $@{@var{var}+@var{value}@} @itemx $@{@var{var}:+@var{value}@} @cindex @code{$@{@var{var}-@var{value}@}} @cindex @code{$@{@var{var}=@var{value}@}} @cindex @code{$@{@var{var}?@var{value}@}} @cindex @code{$@{@var{var}+@var{value}@}} @c Info cannot handle ':' in index entries. @ifnotinfo @cindex @code{$@{@var{var}:-@var{value}@}} @cindex @code{$@{@var{var}:=@var{value}@}} @cindex @code{$@{@var{var}:?@var{value}@}} @cindex @code{$@{@var{var}:+@var{value}@}} @end ifnotinfo When using @samp{$@{@var{var}-@var{value}@}} or similar notations that modify a parameter expansion, Posix requires that @var{value} must be a single shell word, which can contain quoted strings but cannot contain unquoted spaces. If this requirement is not met Solaris 10 @command{/bin/sh} sometimes complains, and anyway the behavior is not portable. @example $ @kbd{/bin/sh -c 'echo $@{a-b c@}'} /bin/sh: bad substitution $ @kbd{/bin/sh -c 'echo $@{a-'\''b c'\''@}'} b c $ @kbd{/bin/sh -c 'echo "$@{a-b c@}"'} b c $ @kbd{/bin/sh -c 'cat <<EOF $@{a-b c@} EOF} b c @end example Most shells treat the special parameters @code{*} and @code{@@} as being unset if there are no positional parameters. However, some shells treat them as being set to the empty string. Posix does not clearly specify either behavior. @example $ @kbd{bash -c 'echo "* is $@{*-unset@}."'} * is unset. $ @kbd{dash -c 'echo "* is $@{*-unset@}."'} * is . @end example According to Posix, if an expansion occurs inside double quotes, then the use of unquoted double quotes within @var{value} is unspecified, and any single quotes become literal characters; in that case, escaping must be done with backslash. Likewise, the use of unquoted here-documents is a case where double quotes have unspecified results: @example $ @kbd{/bin/sh -c 'echo "$@{a-"b c"@}"'} /bin/sh: bad substitution $ @kbd{ksh -c 'echo "$@{a-"b c"@}"'} b c $ @kbd{bash -c 'echo "$@{a-"b c"@}"'} b c $ @kbd{/bin/sh -c 'a=; echo $@{a+'\''b c'\''@}'} b c $ @kbd{/bin/sh -c 'a=; echo "$@{a+'\''b c'\''@}"'} 'b c' $ @kbd{/bin/sh -c 'a=; echo "$@{a+\"b c\"@}"'} "b c" $ @kbd{/bin/sh -c 'a=; echo "$@{a+b c@}"'} b c $ @kbd{/bin/sh -c 'cat <<EOF $@{a-"b c"@} EOF'} "b c" $ @kbd{/bin/sh -c 'cat <<EOF $@{a-'b c'@} EOF'} 'b c' $ @kbd{bash -c 'cat <<EOF $@{a-"b c"@} EOF'} b c $ @kbd{bash -c 'cat <<EOF $@{a-'b c'@} EOF'} 'b c' @end example Perhaps the easiest way to work around quoting issues in a manner portable to all shells is to place the results in a temporary variable, then use @samp{$t} as the @var{value}, rather than trying to inline the expression needing quoting. @example $ @kbd{/bin/sh -c 't="b c\"'\''@}\\"; echo "$@{a-$t@}"'} b c"'@}\ $ @kbd{ksh -c 't="b c\"'\''@}\\"; echo "$@{a-$t@}"'} b c"'@}\ $ @kbd{bash -c 't="b c\"'\''@}\\"; echo "$@{a-$t@}"'} b c"'@}\ @end example @item $@{@var{var}=@var{value}@} @cindex @code{$@{@var{var}=@var{value}@}} When using @samp{$@{@var{var}=@var{value}@}} to assign a default value to @var{var}, remember that even though the assignment to @var{var} does not undergo file name expansion, the result of the variable expansion does unless the expansion occurred within double quotes. In particular, when using @command{:} followed by unquoted variable expansion for the side effect of setting a default value, if the final value of @samp{$var} contains any globbing characters (either from @var{value} or from prior contents), the shell has to spend time performing file name expansion and field splitting even though those results will not be used. Therefore, it is a good idea to consider double quotes when performing default initialization; while remembering how this impacts any quoting characters appearing in @var{value}. @example $ @kbd{time bash -c ': "$@{a=/usr/bin/*@}"; echo "$a"'} /usr/bin/* real 0m0.005s user 0m0.002s sys 0m0.003s $ @kbd{time bash -c ': $@{a=/usr/bin/*@}; echo "$a"'} /usr/bin/* real 0m0.039s user 0m0.026s sys 0m0.009s $ @kbd{time bash -c 'a=/usr/bin/*; : $@{a=noglob@}; echo "$a"'} /usr/bin/* real 0m0.031s user 0m0.020s sys 0m0.010s $ @kbd{time bash -c 'a=/usr/bin/*; : "$@{a=noglob@}"; echo "$a"'} /usr/bin/* real 0m0.006s user 0m0.002s sys 0m0.003s @end example As with @samp{+} and @samp{-}, @var{value} must be a single shell word, otherwise some shells, such as Solaris 10 @command{/bin/sh} or on Digital Unix V 5.0, die because of a ``bad substitution''. Meanwhile, Posix requires that with @samp{=}, quote removal happens prior to the assignment, and the expansion be the final contents of @var{var} without quoting (and thus subject to field splitting), in contrast to the behavior with @samp{-} passing the quoting through to the final expansion. However, @command{bash} 4.1 does not obey this rule. @example $ @kbd{ksh -c 'echo $@{var-a\ \ b@}'} a b $ @kbd{ksh -c 'echo $@{var=a\ \ b@}'} a b $ @kbd{bash -c 'echo $@{var=a\ \ b@}'} a b @end example Finally, Posix states that when mixing @samp{$@{a=b@}} with regular commands, it is unspecified whether the assignments affect the parent shell environment. It is best to perform assignments independently from commands, to avoid the problems demonstrated in this example: @example $ @kbd{bash -c 'x= y=$@{x:=b@} sh -c "echo +\$x+\$y+";echo -$x-'} +b+b+ -b- $ @kbd{/bin/sh -c 'x= y=$@{x:=b@} sh -c "echo +\$x+\$y+";echo -$x-'} ++b+ -- $ @kbd{ksh -c 'x= y=$@{x:=b@} sh -c "echo +\$x+\$y+";echo -$x-'} +b+b+ -- @end example @item $@{@var{var}=@var{value}@} @cindex @code{$@{@var{var}=@var{literal}@}} Solaris 10 @command{/bin/sh} has a frightening bug in its handling of literal assignments. Imagine you need set a variable to a string containing @samp{@}}. This @samp{@}} character confuses Solaris 10 @command{/bin/sh} when the affected variable was already set. This bug can be exercised by running: @example $ @kbd{unset foo} $ @kbd{foo=$@{foo='@}'@}} $ @kbd{echo $foo} @} $ @kbd{foo=$@{foo='@}' # no error; this hints to what the bug is} $ @kbd{echo $foo} @} $ @kbd{foo=$@{foo='@}'@}} $ @kbd{echo $foo} @}@} ^ ugh! @end example It seems that @samp{@}} is interpreted as matching @samp{$@{}, even though it is enclosed in single quotes. The problem doesn't happen using double quotes, or when using a temporary variable holding the problematic string. @item $@{@var{var}=@var{expanded-value}@} @cindex @code{$@{@var{var}=@var{expanded-value}@}} On Ultrix, running @example default="yu,yaa" : $@{var="$default"@} @end example @noindent sets @var{var} to @samp{M-yM-uM-,M-yM-aM-a}, i.e., the 8th bit of each char is set. You don't observe the phenomenon using a simple @samp{echo $var} since apparently the shell resets the 8th bit when it expands $var. Here are two means to make this shell confess its sins: @example $ @kbd{cat -v <<EOF $var EOF} @end example @noindent and @example $ @kbd{set | grep '^var=' | cat -v} @end example One classic incarnation of this bug is: @example default="a b c" : $@{list="$default"@} for c in $list; do echo $c done @end example @noindent You'll get @samp{a b c} on a single line. Why? Because there are no spaces in @samp{$list}: there are @samp{M- }, i.e., spaces with the 8th bit set, hence no IFS splitting is performed!!! One piece of good news is that Ultrix works fine with @samp{: $@{list=$default@}}; i.e., if you @emph{don't} quote. The bad news is then that QNX 4.25 then sets @var{list} to the @emph{last} item of @var{default}! The portable way out consists in using a double assignment, to switch the 8th bit twice on Ultrix: @example list=$@{list="$default"@} @end example @noindent @dots{}but beware of the @samp{@}} bug from Solaris 10 (see above). For safety, use: @example test $@{var+y@} || var=@var{@{value@}} @end example @item $@{#@var{var}@} @itemx $@{@var{var}%@var{word}@} @itemx $@{@var{var}%%@var{word}@} @itemx $@{@var{var}#@var{word}@} @itemx $@{@var{var}##@var{word}@} @cindex @code{$@{#@var{var}@}} @cindex @code{$@{@var{var}%@var{word}@}} @cindex @code{$@{@var{var}%%@var{word}@}} @cindex @code{$@{@var{var}#@var{word}@}} @cindex @code{$@{@var{var}##@var{word}@}} Posix requires support for these usages, but they do not work with many traditional shells, e.g., Solaris 10 @command{/bin/sh}. Also, @command{pdksh} 5.2.14 mishandles some @var{word} forms. For example if @samp{$1} is @samp{a/b} and @samp{$2} is @samp{a}, then @samp{$@{1#$2@}} should yield @samp{/b}, but with @command{pdksh} it yields the empty string. @item `@var{commands}` @cindex @code{`@var{commands}`} @cindex Command Substitution Posix requires shells to trim all trailing newlines from command output before substituting it, so assignments like @samp{dir=`echo "$file" | tr a A`} do not work as expected if @samp{$file} ends in a newline. While in general it makes no sense, do not substitute a single builtin with side effects, because Ash 0.2, trying to optimize, does not fork a subshell to perform the command. For instance, if you wanted to check that @command{cd} is silent, do not use @samp{test -z "`cd /`"} because the following can happen: @example $ @kbd{pwd} /tmp $ @kbd{test -z "`cd /`" && pwd} / @end example @noindent The result of @samp{foo=`exit 1`} is left as an exercise to the reader. The MSYS shell leaves a stray byte in the expansion of a double-quoted command substitution of a native program, if the end of the substitution is not aligned with the end of the double quote. This may be worked around by inserting another pair of quotes: @example $ @kbd{echo "`printf 'foo\r\n'` bar" > broken} $ @kbd{echo "`printf 'foo\r\n'`"" bar" | cmp - broken} - broken differ: char 4, line 1 @end example Upon interrupt or SIGTERM, some shells may abort a command substitution, replace it with a null string, and wrongly evaluate the enclosing command before entering the trap or ending the script. This can lead to spurious errors: @example $ @kbd{sh -c 'if test `sleep 5; echo hi` = hi; then echo yes; fi'} $ @kbd{^C} sh: test: hi: unexpected operator/operand @end example @noindent You can avoid this by assigning the command substitution to a temporary variable: @example $ @kbd{sh -c 'res=`sleep 5; echo hi` if test "x$res" = xhi; then echo yes; fi'} $ @kbd{^C} @end example @item $(@var{commands}) @cindex @code{$(@var{commands})} This construct is meant to replace @samp{`@var{commands}`}, and it has most of the problems listed under @code{`@var{commands}`}. This construct can be nested while this is impossible to do portably with back quotes. Although it is almost universally supported, unfortunately Solaris 10 and earlier releases lack it: @example $ @kbd{showrev -c /bin/sh | grep version} Command version: SunOS 5.10 Generic 142251-02 Sep 2010 $ @kbd{echo $(echo blah)} syntax error: `(' unexpected @end example @noindent nor does IRIX 6.5's Bourne shell: @example $ @kbd{uname -a} IRIX firebird-image 6.5 07151432 IP22 $ @kbd{echo $(echo blah)} $(echo blah) @end example If you do use @samp{$(@var{commands})}, make sure that the commands do not start with a parenthesis, as that would cause confusion with a different notation @samp{$((@var{expression}))} that in modern shells is an arithmetic expression not a command. To avoid the confusion, insert a space between the two opening parentheses. Avoid @var{commands} that contain unbalanced parentheses in here-documents, comments, or case statement patterns, as many shells mishandle them. For example, Bash 3.1, @samp{ksh88}, @command{pdksh} 5.2.14, and Zsh 4.2.6 all mishandle the following valid command: @example echo $(case x in x) echo hello;; esac) @end example @item $((@var{expression})) @cindex @code{$((@var{expression}))} Arithmetic expansion is not portable as some shells (most notably Solaris 10 @command{/bin/sh}) don't support it. Among shells that do support @samp{$(( ))}, not all of them obey the Posix rule that octal and hexadecimal constants must be recognized: @example $ @kbd{bash -c 'echo $(( 010 + 0x10 ))'} 24 $ @kbd{zsh -c 'echo $(( 010 + 0x10 ))'} 26 $ @kbd{zsh -c 'emulate sh; echo $(( 010 + 0x10 ))'} 24 $ @kbd{pdksh -c 'echo $(( 010 + 0x10 ))'} pdksh: 010 + 0x10 : bad number `0x10' $ @kbd{pdksh -c 'echo $(( 010 ))'} 10 @end example When it is available, using arithmetic expansion provides a noticeable speedup in script execution; but testing for support requires @command{eval} to avoid syntax errors. The following construct is used by @code{AS_VAR_ARITH} to provide arithmetic computation when all arguments are provided in decimal and without a leading zero, and all operators are properly quoted and appear as distinct arguments: @example if ( eval 'test $(( 1 + 1 )) = 2' ) 2>/dev/null; then eval 'func_arith () @{ func_arith_result=$(( $* )) @}' else func_arith () @{ func_arith_result=`expr "$@@"` @} fi func_arith 1 + 1 foo=$func_arith_result @end example @item ^ @cindex @code{^} quoting Always quote @samp{^}, otherwise traditional shells such as @command{/bin/sh} on Solaris 10 treat this like @samp{|}. @end table @node Assignments @section Assignments @cindex Shell assignments When setting several variables in a row, be aware that the order of the evaluation is undefined. For instance @samp{foo=1 foo=2; echo $foo} gives @samp{1} with Solaris 10 @command{/bin/sh}, but @samp{2} with Bash. You must use @samp{;} to enforce the order: @samp{foo=1; foo=2; echo $foo}. Don't rely on the following to find @file{subdir/program}: @example PATH=subdir$PATH_SEPARATOR$PATH program @end example @noindent as this does not work with Zsh 3.0.6. Use something like this instead: @example (PATH=subdir$PATH_SEPARATOR$PATH; export PATH; exec program) @end example Don't rely on the exit status of an assignment: Ash 0.2 does not change the status and propagates that of the last statement: @example $ @kbd{false || foo=bar; echo $?} 1 $ @kbd{false || foo=`:`; echo $?} 0 @end example @noindent and to make things even worse, QNX 4.25 just sets the exit status to 0 in any case: @example $ @kbd{foo=`exit 1`; echo $?} 0 @end example To assign default values, follow this algorithm: @enumerate @item If the default value is a literal and does not contain any closing brace, use: @example : "$@{var='my literal'@}" @end example @item If the default value contains no closing brace, has to be expanded, and the variable being initialized is not intended to be IFS-split (i.e., it's not a list), then use: @example : $@{var="$default"@} @end example @item If the default value contains no closing brace, has to be expanded, and the variable being initialized is intended to be IFS-split (i.e., it's a list), then use: @example var=$@{var="$default"@} @end example @item If the default value contains a closing brace, then use: @example test $@{var+y@} || var="has a '@}'" @end example @end enumerate In most cases @samp{var=$@{var="$default"@}} is fine, but in case of doubt, just use the last form. @xref{Shell Substitutions}, items @samp{$@{@var{var}:-@var{value}@}} and @samp{$@{@var{var}=@var{value}@}} for the rationale. @node Parentheses @section Parentheses in Shell Scripts @cindex Shell parentheses Beware of two opening parentheses in a row, as many shell implementations treat them specially, and Posix says that a portable script cannot use @samp{((} outside the @samp{$((} form used for shell arithmetic. In traditional shells, @samp{((cat))} behaves like @samp{(cat)}; but many shells, including Bash and the Korn shell, treat @samp{((cat))} as an arithmetic expression equivalent to @samp{let "cat"}, and may or may not report an error when they detect that @samp{cat} is not a number. As another example, @samp{pdksh} 5.2.14 does not treat the following code as a traditional shell would: @example if ((true) || false); then echo ok fi @end example @noindent To work around this problem, insert a space between the two opening parentheses. There is a similar problem and workaround with @samp{$((}; see @ref{Shell Substitutions}. @node Slashes @section Slashes in Shell Scripts @cindex Shell slashes Unpatched Tru64 5.1 @command{sh} omits the last slash of command-line arguments that contain two trailing slashes: @example $ @kbd{echo / // /// //// .// //.} / / // /// ./ //. $ @kbd{x=//} $ @kbd{eval "echo \$x"} / $ @kbd{set -x} $ @kbd{echo abc | tr -t ab //} + echo abc + tr -t ab / /bc @end example Unpatched Tru64 4.0 @command{sh} adds a slash after @samp{"$var"} if the variable is empty and the second double-quote is followed by a word that begins and ends with slash: @example $ @kbd{sh -xc 'p=; echo "$p"/ouch/'} p= + echo //ouch/ //ouch/ @end example However, our understanding is that patches are available, so perhaps it's not worth worrying about working around these horrendous bugs. @node Special Shell Variables @section Special Shell Variables @cindex Shell variables @cindex Special shell variables Some shell variables should not be used, since they can have a deep influence on the behavior of the shell. In order to recover a sane behavior from the shell, some variables should be unset; M4sh takes care of this and provides fallback values, whenever needed, to cater for a very old @file{/bin/sh} that does not support @command{unset}. (@pxref{Portable Shell, , Portable Shell Programming}). As a general rule, shell variable names containing a lower-case letter are safe; you can define and use these variables without worrying about their effect on the underlying system, and without worrying about whether the shell changes them unexpectedly. (The exception is the shell variable @code{status}, as described below.) Here is a list of names that are known to cause trouble. This list is not exhaustive, but you should be safe if you avoid the name @code{status} and names containing only upper-case letters and underscores. @c Alphabetical order, case insensitive, `A' before `a'. @table @code @item ? Not all shells correctly reset @samp{$?} after conditionals (@pxref{if, , Limitations of Shell Builtins}). Not all shells manage @samp{$?} correctly in shell functions (@pxref{Shell Functions}) or in traps (@pxref{trap, , Limitations of Shell Builtins}). Not all shells reset @samp{$?} to zero after an empty command. @example $ @kbd{bash -c 'false; $empty; echo $?'} 0 $ @kbd{zsh -c 'false; $empty; echo $?'} 1 @end example @item _ @evindex _ Many shells reserve @samp{$_} for various purposes, e.g., the name of the last command executed. @item BIN_SH @evindex BIN_SH In Tru64, if @env{BIN_SH} is set to @code{xpg4}, subsidiary invocations of the standard shell conform to Posix. @item CDPATH @evindex CDPATH When this variable is set it specifies a list of directories to search when invoking @code{cd} with a relative file name that did not start with @samp{./} or @samp{../}. Posix 1003.1-2001 says that if a nonempty directory name from @env{CDPATH} is used successfully, @code{cd} prints the resulting absolute file name. Unfortunately this output can break idioms like @samp{abs=`cd src && pwd`} because @code{abs} receives the name twice. Also, many shells do not conform to this part of Posix; for example, @command{zsh} prints the result only if a directory name other than @file{.} was chosen from @env{CDPATH}. In practice the shells that have this problem also support @command{unset}, so you can work around the problem as follows: @example (unset CDPATH) >/dev/null 2>&1 && unset CDPATH @end example You can also avoid output by ensuring that your directory name is absolute or anchored at @samp{./}, as in @samp{abs=`cd ./src && pwd`}. Configure scripts use M4sh, which automatically unsets @env{CDPATH} if possible, so you need not worry about this problem in those scripts. @item CLICOLOR_FORCE @evindex CLICOLOR_FORCE When this variable is set, some implementations of tools like @command{ls} attempt to add color to their output via terminal escape sequences, even when the output is not directed to a terminal, and can thus cause spurious failures in scripts. Configure scripts use M4sh, which automatically unsets this variable. @item DUALCASE @evindex DUALCASE In the MKS shell, case statements and file name generation are case-insensitive unless @env{DUALCASE} is nonzero. Autoconf-generated scripts export this variable when they start up. @item ENV @itemx MAIL @itemx MAILPATH @itemx PS1 @itemx PS2 @itemx PS4 @evindex ENV @evindex MAIL @evindex MAILPATH @evindex PS1 @evindex PS2 @evindex PS4 These variables should not matter for shell scripts, since they are supposed to affect only interactive shells. However, at least one shell (the pre-3.0 UWIN Korn shell) gets confused about whether it is interactive, which means that (for example) a @env{PS1} with a side effect can unexpectedly modify @samp{$?}. To work around this bug, M4sh scripts (including @file{configure} scripts) do something like this: @example (unset ENV) >/dev/null 2>&1 && unset ENV MAIL MAILPATH PS1='$ ' PS2='> ' PS4='+ ' @end example @noindent (actually, there is some complication due to bugs in @command{unset}; @pxref{unset, , Limitations of Shell Builtins}). @item FPATH @evindex FPATH The Korn shell uses @env{FPATH} to find shell functions, so avoid @env{FPATH} in portable scripts. @env{FPATH} is consulted after @env{PATH}, but you still need to be wary of tests that use @env{PATH} to find whether a command exists, since they might report the wrong result if @env{FPATH} is also set. @item GREP_OPTIONS @evindex GREP_OPTIONS When this variable is set, some implementations of @command{grep} honor these options, even if the options include direction to enable colored output via terminal escape sequences, and the result can cause spurious failures when the output is not directed to a terminal. Configure scripts use M4sh, which automatically unsets this variable. @item IFS @evindex IFS Long ago, shell scripts inherited @env{IFS} from the environment, but this caused many problems so modern shells ignore any environment settings for @env{IFS}. Don't set the first character of @env{IFS} to backslash. Indeed, Bourne shells use the first character (backslash) when joining the components in @samp{"$@@"} and some shells then reinterpret (!)@: the backslash escapes, so you can end up with backspace and other strange characters. The proper value for @env{IFS} (in regular code, not when performing splits) is @samp{@key{SPC}@key{TAB}@key{RET}}. The first character is especially important, as it is used to join the arguments in @samp{$*}; however, note that traditional shells, but also bash-2.04, fail to adhere to this and join with a space anyway. M4sh guarantees that @env{IFS} will have the default value at the beginning of a script, and many macros within autoconf rely on this setting. It is okay to use blocks of shell code that temporarily change the value of @env{IFS} in order to split on another character, but remember to restore it before expanding further macros. Unsetting @code{IFS} instead of resetting it to the default sequence is not suggested, since code that tries to save and restore the variable's value will incorrectly reset it to an empty value, thus disabling field splitting: @example unset IFS # default separators used for field splitting save_IFS=$IFS IFS=: # ... IFS=$save_IFS # no field splitting performed @end example @item LANG @itemx LC_ALL @itemx LC_COLLATE @itemx LC_CTYPE @itemx LC_MESSAGES @itemx LC_MONETARY @itemx LC_NUMERIC @itemx LC_TIME @evindex LANG @evindex LC_ALL @evindex LC_COLLATE @evindex LC_CTYPE @evindex LC_MESSAGES @evindex LC_MONETARY @evindex LC_NUMERIC @evindex LC_TIME You should set all these variables to @samp{C} because so much configuration code assumes the C locale and Posix requires that locale environment variables be set to @samp{C} if the C locale is desired; @file{configure} scripts and M4sh do that for you. Export these variables after setting them. @c However, some older, nonstandard @c systems (notably SCO) break if locale environment variables @c are set to @samp{C}, so when running on these systems @c Autoconf-generated scripts unset the variables instead. @item LANGUAGE @evindex LANGUAGE @env{LANGUAGE} is not specified by Posix, but it is a GNU extension that overrides @env{LC_ALL} in some cases, so you (or M4sh) should set it too. @item LC_ADDRESS @itemx LC_IDENTIFICATION @itemx LC_MEASUREMENT @itemx LC_NAME @itemx LC_PAPER @itemx LC_TELEPHONE @evindex LC_ADDRESS @evindex LC_IDENTIFICATION @evindex LC_MEASUREMENT @evindex LC_NAME @evindex LC_PAPER @evindex LC_TELEPHONE These locale environment variables are GNU extensions. They are treated like their Posix brethren (@env{LC_COLLATE}, etc.)@: as described above. @item LINENO @evindex LINENO Most modern shells provide the current line number in @code{LINENO}. Its value is the line number of the beginning of the current command. M4sh, and hence Autoconf, attempts to execute @command{configure} with a shell that supports @code{LINENO}. If no such shell is available, it attempts to implement @code{LINENO} with a Sed prepass that replaces each instance of the string @code{$LINENO} (not followed by an alphanumeric character) with the line's number. In M4sh scripts you should execute @code{AS_LINENO_PREPARE} so that these workarounds are included in your script; configure scripts do this automatically in @code{AC_INIT}. You should not rely on @code{LINENO} within @command{eval} or shell functions, as the behavior differs in practice. The presence of a quoted newline within simple commands can alter which line number is used as the starting point for @code{$LINENO} substitutions within that command. Also, the possibility of the Sed prepass means that you should not rely on @code{$LINENO} when quoted, when in here-documents, or when line continuations are used. Subshells should be OK, though. In the following example, lines 1, 9, and 14 are portable, but the other instances of @code{$LINENO} do not have deterministic values: @example @group $ @kbd{cat lineno} echo 1. $LINENO echo "2. $LINENO 3. $LINENO" cat <<EOF 5. $LINENO 6. $LINENO 7. \$LINENO EOF ( echo 9. $LINENO ) eval 'echo 10. $LINENO' eval 'echo 11. $LINENO echo 12. $LINENO' echo 13. '$LINENO' echo 14. $LINENO ' 15.' $LINENO f () @{ echo $1 $LINENO; echo $1 $LINENO @} f 18. echo 19. \ $LINENO @end group @group $ @kbd{bash-3.2 ./lineno} 1. 1 2. 3 3. 3 5. 4 6. 4 7. $LINENO 9. 9 10. 10 11. 12 12. 13 13. $LINENO 14. 14 15. 14 18. 16 18. 17 19. 19 @end group @group $ @kbd{zsh-4.3.4 ./lineno} 1. 1 2. 2 3. 2 5. 4 6. 4 7. $LINENO 9. 9 10. 1 11. 1 12. 2 13. $LINENO 14. 14 15. 14 18. 0 18. 1 19. 19 @end group @group $ @kbd{pdksh-5.2.14 ./lineno} 1. 1 2. 2 3. 2 5. 4 6. 4 7. $LINENO 9. 9 10. 0 11. 0 12. 0 13. $LINENO 14. 14 15. 14 18. 16 18. 17 19. 19 @end group @group $ @kbd{sed '=' <lineno |} > @kbd{ sed '} > @kbd{ N} > @kbd{ s,$,-,} > @kbd{ t loop} > @kbd{ :loop} > @kbd{ s,^\([0-9]*\)\(.*\)[$]LINENO\([^a-zA-Z0-9_]\),\1\2\1\3,} > @kbd{ t loop} > @kbd{ s,-$,,} > @kbd{ s,^[0-9]*\n,,} > @kbd{ ' |} > @kbd{ sh} 1. 1 2. 2 3. 3 5. 5 6. 6 7. \7 9. 9 10. 10 11. 11 12. 12 13. 13 14. 14 15. 15 18. 16 18. 17 19. 20 @end group @end example In particular, note that @file{config.status} (and any other subsidiary script created by @code{AS_INIT_GENERATED}) might report line numbers relative to the parent script as a result of the potential Sed pass. @item NULLCMD @evindex NULLCMD When executing the command @samp{>foo}, @command{zsh} executes @samp{$NULLCMD >foo} unless it is operating in Bourne shell compatibility mode and the @command{zsh} version is newer than 3.1.6-dev-18. If you are using an older @command{zsh} and forget to set @env{NULLCMD}, your script might be suspended waiting for data on its standard input. @item options @evindex options For @command{zsh} 4.3.10, @env{options} is treated as an associative array even after @code{emulate sh}, so it should not be used. @item PATH_SEPARATOR @evindex PATH_SEPARATOR On DJGPP systems, the @env{PATH_SEPARATOR} environment variable can be set to either @samp{:} or @samp{;} to control the path separator Bash uses to set up certain environment variables (such as @env{PATH}). You can set this variable to @samp{;} if you want @command{configure} to use @samp{;} as a separator; this might be useful if you plan to use non-Posix shells to execute files. @xref{File System Conventions}, for more information about @code{PATH_SEPARATOR}. @item POSIXLY_CORRECT @evindex POSIXLY_CORRECT In the GNU environment, exporting @env{POSIXLY_CORRECT} with any value (even empty) causes programs to try harder to conform to Posix. Autoconf does not directly manipulate this variable, but @command{bash} ties the shell variable @env{POSIXLY_CORRECT} to whether the script is running in Posix mode. Therefore, take care when exporting or unsetting this variable, so as not to change whether @command{bash} is in Posix mode. @example $ @kbd{bash --posix -c 'set -o | grep posix} > @kbd{unset POSIXLY_CORRECT} > @kbd{set -o | grep posix'} posix on posix off @end example @item PWD @evindex PWD Posix 1003.1-2001 requires that @command{cd} and @command{pwd} must update the @env{PWD} environment variable to point to the logical name of the current directory, but traditional shells do not support this. This can cause confusion if one shell instance maintains @env{PWD} but a subsidiary and different shell does not know about @env{PWD} and executes @command{cd}; in this case @env{PWD} points to the wrong directory. Use @samp{`pwd`} rather than @samp{$PWD}. @item RANDOM @evindex RANDOM Many shells provide @code{RANDOM}, a variable that returns a different integer each time it is used. Most of the time, its value does not change when it is not used, but on IRIX 6.5 the value changes all the time. This can be observed by using @command{set}. It is common practice to use @code{$RANDOM} as part of a file name, but code shouldn't rely on @code{$RANDOM} expanding to a nonempty string. @item status @evindex status This variable is an alias to @samp{$?} for @code{zsh} (at least 3.1.6), hence read-only. Do not use it. @end table @node Shell Functions @section Shell Functions @cindex Shell Functions Nowadays, it is difficult to find a shell that does not support shell functions at all. However, some differences should be expected. When declaring a shell function, you must include whitespace between the @samp{)} after the function name and the start of the compound expression, to avoid upsetting @command{ksh}. While it is possible to use any compound command, most scripts use @samp{@{@dots{}@}}. @example $ @kbd{/bin/sh -c 'a()@{ echo hi;@}; a'} hi $ @kbd{ksh -c 'a()@{ echo hi;@}; a'} ksh: syntax error at line 1: `@}' unexpected $ @kbd{ksh -c 'a() @{ echo hi;@}; a'} hi @end example Inside a shell function, you should not rely on the error status of a subshell if the last command of that subshell was @code{exit} or @code{trap}, as this triggers bugs in zsh 4.x; while Autoconf tries to find a shell that does not exhibit the bug, zsh might be the only shell present on the user's machine. Likewise, the state of @samp{$?} is not reliable when entering a shell function. This has the effect that using a function as the first command in a @command{trap} handler can cause problems. @example $ @kbd{bash -c 'foo() @{ echo $?; @}; trap foo 0; (exit 2); exit 2'; echo $?} 2 2 $ @kbd{ash -c 'foo() @{ echo $?; @}; trap foo 0; (exit 2); exit 2'; echo $?} 0 2 @end example DJGPP bash 2.04 has a bug in that @command{return} from a shell function which also used a command substitution causes a segmentation fault. To work around the issue, you can use @command{return} from a subshell, or @samp{AS_SET_STATUS} as last command in the execution flow of the function (@pxref{Common Shell Constructs}). Not all shells treat shell functions as simple commands impacted by @samp{set -e}, for example with Solaris 10 @command{/bin/sh}: @example $ @kbd{bash -c 'f() @{ return 1; @}; set -e; f; echo oops'} $ @kbd{/bin/sh -c 'f() @{ return 1; @}; set -e; f; echo oops'} oops @end example Shell variables and functions may share the same namespace, for example with Solaris 10 @command{/bin/sh}: @example $ @kbd{f () @{ :; @}; f=; f} f: not found @end example @noindent For this reason, Autoconf (actually M4sh, @pxref{Programming in M4sh}) uses the prefix @samp{as_fn_} for its functions. Handling of positional parameters and shell options varies among shells. For example, Korn shells reset and restore trace output (@samp{set -x}) and other options upon function entry and exit. Inside a function, IRIX sh sets @samp{$0} to the function name. It is not portable to pass temporary environment variables to shell functions. Solaris 10 @command{/bin/sh} does not see the variable. Meanwhile, not all shells follow the Posix rule that the assignment must affect the current environment in the same manner as special built-ins. @example $ @kbd{/bin/sh -c 'func() @{ echo $a;@}; a=1 func; echo $a'} @result{} @result{} $ @kbd{ash -c 'func() @{ echo $a;@}; a=1 func; echo $a'} @result{}1 @result{} $ @kbd{bash -c 'set -o posix; func() @{ echo $a;@}; a=1 func; echo $a'} @result{}1 @result{}1 @end example Some ancient Bourne shell variants with function support did not reset @samp{$@var{i}, @var{i} >= 0}, upon function exit, so effectively the arguments of the script were lost after the first function invocation. It is probably not worth worrying about these shells any more. With AIX sh, a @command{trap} on 0 installed in a shell function triggers at function exit rather than at script exit. @xref{trap, , Limitations of Shell Builtins}. @node Limitations of Builtins @section Limitations of Shell Builtins @cindex Shell builtins @cindex Limitations of shell builtins No, no, we are serious: some shells do have limitations! :) You should always keep in mind that any builtin or command may support options, and therefore differ in behavior with arguments starting with a dash. For instance, even the innocent @samp{echo "$word"} can give unexpected results when @code{word} starts with a dash. It is often possible to avoid this problem using @samp{echo "x$word"}, taking the @samp{x} into account later in the pipe. Many of these limitations can be worked around using M4sh (@pxref{Programming in M4sh}). @c This table includes things like `@command{test} (files)', so we can't @c use @table @command. @table @asis @item @command{.} @c -------------- @prindex @command{.} Use @command{.} only with regular files (use @samp{test -f}). Bash 2.03, for instance, chokes on @samp{. /dev/null}. Remember that @command{.} uses @env{PATH} if its argument contains no slashes. Also, some shells, including bash 3.2, implicitly append the current directory to this @env{PATH} search, even though Posix forbids it. So if you want to use @command{.} on a file @file{foo} in the current directory, you must use @samp{. ./foo}. Not all shells gracefully handle syntax errors within a sourced file. On one extreme, some non-interactive shells abort the entire script. On the other, @command{zsh} 4.3.10 has a bug where it fails to react to the syntax error. @example $ @kbd{echo 'fi' > syntax} $ @kbd{bash -c '. ./syntax; echo $?'} ./syntax: line 1: syntax error near unexpected token `fi' ./syntax: line 1: `fi' 1 $ @kbd{ash -c '. ./syntax; echo $?'} ./syntax: 1: Syntax error: "fi" unexpected $ @kbd{zsh -c '. ./syntax; echo $?'} ./syntax:1: parse error near `fi' 0 @end example @item @command{!} @c -------------- @prindex @command{!} The Unix version 7 shell did not support negating the exit status of commands with @command{!}, and this feature is still absent from some shells (e.g., Solaris 10 @command{/bin/sh}). Other shells, such as FreeBSD @command{/bin/sh} or @command{ash}, have bugs when using @command{!}: @example $ @kbd{sh -c '! : | :'; echo $?} 1 $ @kbd{ash -c '! : | :'; echo $?} 0 $ @kbd{sh -c '! @{ :; @}'; echo $?} 1 $ @kbd{ash -c '! @{ :; @}'; echo $?} @{: not found Syntax error: "@}" unexpected 2 @end example Shell code like this: @example if ! cmp file1 file2 >/dev/null 2>&1; then echo files differ or trouble fi @end example is therefore not portable in practice. Typically it is easy to rewrite such code, e.g.: @example cmp file1 file2 >/dev/null 2>&1 || echo files differ or trouble @end example More generally, one can always rewrite @samp{! @var{command}} as: @example if @var{command}; then (exit 1); else :; fi @end example @item @command{@{...@}} @c -------------------- @prindex @command{@{...@}} Bash 3.2 (and earlier versions) sometimes does not properly set @samp{$?} when failing to write redirected output of a compound command. This problem is most commonly observed with @samp{@{@dots{}@}}; it does not occur with @samp{(@dots{})}. For example: @example $ @kbd{bash -c '@{ echo foo; @} >/bad; echo $?'} bash: line 1: /bad: Permission denied 0 $ @kbd{bash -c 'while :; do echo; done >/bad; echo $?'} bash: line 1: /bad: Permission denied 0 @end example To work around the bug, prepend @samp{:;}: @example $ @kbd{bash -c ':;@{ echo foo; @} >/bad; echo $?'} bash: line 1: /bad: Permission denied 1 @end example Posix requires a syntax error if a brace list has no contents. However, not all shells obey this rule; and on shells where empty lists are permitted, the effect on @samp{$?} is inconsistent. To avoid problems, ensure that a brace list is never empty. @example $ @kbd{bash -c 'false; @{ @}; echo $?' || echo $?} bash: line 1: syntax error near unexpected token `@}' bash: line 1: `false; @{ @}; echo $?' 2 $ @kbd{zsh -c 'false; @{ @}; echo $?' || echo $?} 1 $ @kbd{pdksh -c 'false; @{ @}; echo $?' || echo $?} 0 @end example @item @command{break} @c ------------------ @prindex @command{break} The use of @samp{break 2} etc.@: is safe. @anchor{case} @item @command{case} @c ----------------- @prindex @command{case} You don't need to quote the argument; no splitting is performed. You don't need the final @samp{;;}, but you should use it. Posix requires support for @code{case} patterns with opening parentheses like this: @example case $file_name in (*.c) echo "C source code";; esac @end example @noindent but the @code{(} in this example is not portable to a few obsolescent Bourne shell implementations, which is a pity for those of us using tools that rely on balanced parentheses. For instance, with Solaris 10 @command{/bin/sh}: @example $ @kbd{case foo in (foo) echo foo;; esac} @error{}syntax error: `(' unexpected @end example @noindent The leading @samp{(} can be omitted safely. Unfortunately, there are contexts where unbalanced parentheses cause other problems, such as when using a syntax-highlighting editor that searches for the balancing counterpart, or more importantly, when using a case statement as an underquoted argument to an Autoconf macro. @xref{Balancing Parentheses}, for trade-offs involved in various styles of dealing with unbalanced @samp{)}. Zsh handles pattern fragments derived from parameter expansions or command substitutions as though quoted: @example $ pat=\?; case aa in ?$pat) echo match;; esac $ pat=\?; case a? in ?$pat) echo match;; esac match @end example @noindent Because of a bug in its @code{fnmatch}, Bash fails to properly handle backslashes in character classes: @example bash-2.02$ @kbd{case /tmp in [/\\]*) echo OK;; esac} bash-2.02$ @end example @noindent This is extremely unfortunate, since you are likely to use this code to handle Posix or MS-DOS absolute file names. To work around this bug, always put the backslash first: @example bash-2.02$ @kbd{case '\TMP' in [\\/]*) echo OK;; esac} OK bash-2.02$ @kbd{case /tmp in [\\/]*) echo OK;; esac} OK @end example Many Bourne shells cannot handle closing brackets in character classes correctly. Some shells also have problems with backslash escaping in case you do not want to match the backslash: both a backslash and the escaped character match this pattern. To work around this, specify the character class in a variable, so that quote removal does not apply afterwards, and the special characters don't have to be backslash-escaped: @example $ @kbd{case '\' in [\<]) echo OK;; esac} OK $ @kbd{scanset='[<]'; case '\' in $scanset) echo OK;; esac} $ @end example Even with this, Solaris @command{ksh} matches a backslash if the set contains any of the characters @samp{|}, @samp{&}, @samp{(}, or @samp{)}. Conversely, Tru64 @command{ksh} (circa 2003) erroneously always matches a closing parenthesis if not specified in a character class: @example $ @kbd{case foo in *\)*) echo fail ;; esac} fail $ @kbd{case foo in *')'*) echo fail ;; esac} fail @end example Some shells, such as Ash 0.3.8, are confused by an empty @code{case}/@code{esac}: @example ash-0.3.8 $ @kbd{case foo in esac;} @error{}Syntax error: ";" unexpected (expecting ")") @end example Posix requires @command{case} to give an exit status of 0 if no cases match. However, @command{/bin/sh} in Solaris 10 does not obey this rule. Meanwhile, it is unclear whether a case that matches, but contains no statements, must also change the exit status to 0. The M4sh macro @code{AS_CASE} works around these inconsistencies. @example $ @kbd{bash -c 'case `false` in ?) ;; esac; echo $?'} 0 $ @kbd{/bin/sh -c 'case `false` in ?) ;; esac; echo $?'} 255 @end example @item @command{cd} @c --------------- @prindex @command{cd} Posix 1003.1-2001 requires that @command{cd} must support the @option{-L} (``logical'') and @option{-P} (``physical'') options, with @option{-L} being the default. However, traditional shells do not support these options, and their @command{cd} command has the @option{-P} behavior. Portable scripts should assume neither option is supported, and should assume neither behavior is the default. This can be a bit tricky, since the Posix default behavior means that, for example, @samp{ls ..} and @samp{cd ..} may refer to different directories if the current logical directory is a symbolic link. It is safe to use @code{cd @var{dir}} if @var{dir} contains no @file{..} components. Also, Autoconf-generated scripts check for this problem when computing variables like @code{ac_top_srcdir} (@pxref{Configuration Actions}), so it is safe to @command{cd} to these variables. Posix states that behavior is undefined if @command{cd} is given an explicit empty argument. Some shells do nothing, some change to the first entry in @env{CDPATH}, some change to @env{HOME}, and some exit the shell rather than returning an error. Unfortunately, this means that if @samp{$var} is empty, then @samp{cd "$var"} is less predictable than @samp{cd $var} (at least the latter is well-behaved in all shells at changing to @env{HOME}, although this is probably not what you wanted in a script). You should check that a directory name was supplied before trying to change locations. @xref{Special Shell Variables}, for portability problems involving @command{cd} and the @env{CDPATH} environment variable. Also please see the discussion of the @command{pwd} command. @anchor{echo} @item @command{echo} @c ----------------- @prindex @command{echo} The simple @command{echo} is probably the most surprising source of portability troubles. It is not possible to use @samp{echo} portably unless both options and escape sequences are omitted. Don't expect any option. Do not use backslashes in the arguments, as there is no consensus on their handling. For @samp{echo '\n' | wc -l}, the @command{sh} of Solaris 10 outputs 2, but Bash and Zsh (in @command{sh} emulation mode) output 1. The problem is truly @command{echo}: all the shells understand @samp{'\n'} as the string composed of a backslash and an @samp{n}. Within a command substitution, @samp{echo 'string\c'} will mess up the internal state of ksh88 on AIX 6.1 so that it will print the first character @samp{s} only, followed by a newline, and then entirely drop the output of the next echo in a command substitution. Because of these problems, do not pass a string containing arbitrary characters to @command{echo}. For example, @samp{echo "$foo"} is safe only if you know that @var{foo}'s value cannot contain backslashes and cannot start with @samp{-}. Normally, @command{printf} is safer and easier to use than @command{echo} and @command{echo -n}. Thus, you should use @command{printf "%s\n"} instead of @command{echo}, and similarly use @command{printf %s} instead of @command{echo -n}. Older scripts, written before @command{printf} was portable, sometimes used a here-document as a safer alternative to @command{echo}, like this: @example cat <<EOF $foo EOF @end example @item @command{eval} @c ----------------- @prindex @command{eval} The @command{eval} command is useful in limited circumstances, e.g., using commands like @samp{eval table_$key=\$value} and @samp{eval value=table_$key} to simulate a hash table when the key is known to be alphanumeric. You should also be wary of common bugs in @command{eval} implementations. In some shell implementations (e.g., older @command{ash}, OpenBSD 3.8 @command{sh}, @command{pdksh} v5.2.14 99/07/13.2, and @command{zsh} 4.2.5), the arguments of @samp{eval} are evaluated in a context where @samp{$?} is 0, so they exhibit behavior like this: @example $ @kbd{false; eval 'echo $?'} 0 @end example The correct behavior here is to output a nonzero value, but portable scripts should not rely on this. You should not rely on @code{LINENO} within @command{eval}. @xref{Special Shell Variables}. Note that, even though these bugs are easily avoided, @command{eval} is tricky to use on arbitrary arguments. It is obviously unwise to use @samp{eval $cmd} if the string value of @samp{cmd} was derived from an untrustworthy source. But even if the string value is valid, @samp{eval $cmd} might not work as intended, since it causes field splitting and file name expansion to occur twice, once for the @command{eval} and once for the command itself. It is therefore safer to use @samp{eval "$cmd"}. For example, if @var{cmd} has the value @samp{cat test?.c}, @samp{eval $cmd} might expand to the equivalent of @samp{cat test;.c} if there happens to be a file named @file{test;.c} in the current directory; and this in turn mistakenly attempts to invoke @command{cat} on the file @file{test} and then execute the command @command{.c}. To avoid this problem, use @samp{eval "$cmd"} rather than @samp{eval $cmd}. However, suppose that you want to output the text of the evaluated command just before executing it. Assuming the previous example, @samp{echo "Executing: $cmd"} outputs @samp{Executing: cat test?.c}, but this output doesn't show the user that @samp{test;.c} is the actual name of the copied file. Conversely, @samp{eval "echo Executing: $cmd"} works on this example, but it fails with @samp{cmd='cat foo >bar'}, since it mistakenly replaces the contents of @file{bar} by the string @samp{cat foo}. No simple, general, and portable solution to this problem is known. @item @command{exec} @c ----------------- @prindex @command{exec} Posix describes several categories of shell built-ins. Special built-ins (such as @command{exit}) must impact the environment of the current shell, and need not be available through @command{exec}. All other built-ins are regular, and must not propagate variable assignments to the environment of the current shell. However, the group of regular built-ins is further distinguished by commands that do not require a @env{PATH} search (such as @command{cd}), in contrast to built-ins that are offered as a more efficient version of something that must still be found in a @env{PATH} search (such as @command{echo}). Posix is not clear on whether @command{exec} must work with the list of 17 utilities that are invoked without a @env{PATH} search, and many platforms lack an executable for some of those built-ins: @example $ @kbd{sh -c 'exec cd /tmp'} sh: line 0: exec: cd: not found @end example All other built-ins that provide utilities specified by Posix must have a counterpart executable that exists on @env{PATH}, although Posix allows @command{exec} to use the built-in instead of the executable. For example, contrast @command{bash} 3.2 and @command{pdksh} 5.2.14: @example $ @kbd{bash -c 'pwd --version' | head -n1} bash: line 0: pwd: --: invalid option pwd: usage: pwd [-LP] $ @kbd{bash -c 'exec pwd --version' | head -n1} pwd (GNU coreutils) 6.10 $ @kbd{pdksh -c 'exec pwd --version' | head -n1} pdksh: pwd: --: unknown option @end example When it is desired to avoid a regular shell built-in, the workaround is to use some other forwarding command, such as @command{env} or @command{nice}, that will ensure a path search: @example $ @kbd{pdksh -c 'exec true --version' | head -n1} $ @kbd{pdksh -c 'nice true --version' | head -n1} true (GNU coreutils) 6.10 $ @kbd{pdksh -c 'env true --version' | head -n1} true (GNU coreutils) 6.10 @end example @item @command{exit} @c ----------------- @prindex @command{exit} The default value of @command{exit} is supposed to be @code{$?}; unfortunately, some shells, such as the DJGPP port of Bash 2.04, just perform @samp{exit 0}. @example bash-2.04$ @kbd{foo=`exit 1` || echo fail} fail bash-2.04$ @kbd{foo=`(exit 1)` || echo fail} fail bash-2.04$ @kbd{foo=`(exit 1); exit` || echo fail} bash-2.04$ @end example Using @samp{exit $?} restores the expected behavior. Some shell scripts, such as those generated by @command{autoconf}, use a trap to clean up before exiting. If the last shell command exited with nonzero status, the trap also exits with nonzero status so that the invoker can tell that an error occurred. Unfortunately, in some shells, such as Solaris 10 @command{/bin/sh}, an exit trap ignores the @code{exit} command's argument. In these shells, a trap cannot determine whether it was invoked by plain @code{exit} or by @code{exit 1}. Instead of calling @code{exit} directly, use the @code{AC_MSG_ERROR} macro that has a workaround for this problem. @anchor{export} @item @command{export} @c ------------------- @prindex @command{export} The builtin @command{export} dubs a shell variable @dfn{environment variable}. Each update of exported variables corresponds to an update of the environment variables. Conversely, each environment variable received by the shell when it is launched should be imported as a shell variable marked as exported. Alas, many shells, such as Solaris 10 @command{/bin/sh}, IRIX 6.3, IRIX 5.2, AIX 4.1.5, and Digital Unix 4.0, forget to @command{export} the environment variables they receive. As a result, two variables coexist: the environment variable and the shell variable. The following code demonstrates this failure: @example #!/bin/sh echo $FOO FOO=bar echo $FOO exec /bin/sh $0 @end example @noindent when run with @samp{FOO=foo} in the environment, these shells print alternately @samp{foo} and @samp{bar}, although they should print only @samp{foo} and then a sequence of @samp{bar}s. Therefore you should @command{export} again each environment variable that you update; the export can occur before or after the assignment. Posix is not clear on whether the @command{export} of an undefined variable causes the variable to be defined with the value of an empty string, or merely marks any future definition of a variable by that name for export. Various shells behave differently in this regard: @example $ @kbd{sh -c 'export foo; env | grep foo'} $ @kbd{ash -c 'export foo; env | grep foo'} foo= @end example Posix requires @command{export} to honor assignments made as arguments, but older shells do not support this, including @command{/bin/sh} in Solaris 10. Portable scripts should separate assignments and exports into different statements. @example $ @kbd{bash -c 'export foo=bar; echo $foo'} bar $ @kbd{/bin/sh -c 'export foo=bar; echo $foo'} /bin/sh: foo=bar: is not an identifier $ @kbd{/bin/sh -c 'export foo; foo=bar; echo $foo'} bar @end example Posix requires @command{export} to work with any arbitrary value for the contents of the variable being exported, as long as the total size of the environment combined with arguments doesn't exceed @code{ARG_MAX} when executing a child process. However, some shells have extensions that involve interpreting some environment values specially, regardless of the variable name. We currently know of one case: all versions of Bash released prior to 27 September 2014 interpret an environment variable with an initial content substring of @code{() @{} as an exported function definition (this is the ``Shellshock'' remote execution bug, CVE-2014-6271 and friends, where it was possible to exploit the function parser to cause remote code execution on child bash startup; newer versions of Bash use special environment variable @emph{names} instead of values to implement the same feature). There may be entries inherited into the environment that are not valid as shell variable names; Posix states that processes should be tolerant of these names. Some shells such as @command{dash} do this by removing those names from the environment at startup, while others such as @command{bash} hide the entry from shell access but still pass it on to child processes. While you can set such names using @command{env} for a direct child process, you cannot rely on them being preserved through an intermediate pass through the shell. @item @command{false} @c ------------------ @prindex @command{false} Don't expect @command{false} to exit with status 1: in native Solaris @file{/bin/false} exits with status 255. @item @command{for} @c ---------------- @prindex @command{for} To loop over positional arguments, use: @example for arg do echo "$arg" done @end example @noindent You may @emph{not} leave the @code{do} on the same line as @code{for}, since some shells improperly grok: @example for arg; do echo "$arg" done @end example If you want to explicitly refer to the positional arguments, given the @samp{$@@} bug (@pxref{Shell Substitutions}), use: @example for arg in $@{1+"$@@"@}; do echo "$arg" done @end example @noindent But keep in mind that Zsh, even in Bourne shell emulation mode, performs word splitting on @samp{$@{1+"$@@"@}}; see @ref{Shell Substitutions}, item @samp{$@@}, for more. Posix requires support for a @command{for} loop with no list after @code{in}. However, Solaris 10 @command{/bin/sh} treats that as a syntax error. It is possible to work around this by providing any shell word that expands to nothing, or by ignoring an obvious sentinel. @example $ @kbd{/bin/sh -c 'for a in $empty; do echo hi; done'} $ @kbd{/bin/sh -c 'for a in ; do echo hi; done'} /bin/sh: syntax error at line 1: `;' unexpected @end example This syntax problem is most frequently encountered in code that goes through several layers of expansion, such as an m4 macro or makefile variable used as a list body, where the first layer of expansion (m4 or make) can end up expanding to nothing in the version handed to the shell. In the makefile context, one common workaround is to use a shell variable rather than a make variable as the source of the list. @example $ @kbd{cat Makefile} list = bad: @@for arg in $(list); do echo $$arg; done good: @@list='$(list)'; for arg in $$list; do echo $$arg; done $ @kbd{make bad 2&>1 | head -n1} sh: syntax error at line 1: `;' unexpected $ @kbd{make bad list='a b'} a b $ @kbd{make good} $ @kbd{make good list='a b'} a b @end example In Solaris 10 @command{/bin/sh}, when the list of arguments of a @command{for} loop starts with @emph{unquoted} tokens looking like variable assignments, the loop is not executed on those tokens: @example $ @kbd{/bin/sh -c 'for v in a=b c=d x e=f; do echo $v; done'} x e=f @end example @noindent Thankfully, quoting the assignment-like tokens, or starting the list with other tokens (including unquoted variable expansion that results in an assignment-like result), avoids the problem, so it is easy to work around: @example $ @kbd{/bin/sh -c 'for v in "a=b"; do echo $v; done'} a=b $ @kbd{/bin/sh -c 'x=a=b; for v in $x c=d; do echo $v; done'} a=b c=d @end example @anchor{if} @item @command{if} @c --------------- @prindex @command{if} Using @samp{!} is not portable. Instead of: @example if ! cmp -s file file.new; then mv file.new file fi @end example @noindent use: @example if cmp -s file file.new; then :; else mv file.new file fi @end example @noindent Or, especially if the @dfn{else} branch is short, you can use @code{||}. In M4sh, the @code{AS_IF} macro provides an easy way to write these kinds of conditionals: @example AS_IF([cmp -s file file.new], [], [mv file.new file]) @end example This is especially useful in other M4 macros, where the @dfn{then} and @dfn{else} branches might be macro arguments. Some very old shells did not reset the exit status from an @command{if} with no @command{else}: @example $ @kbd{if (exit 42); then true; fi; echo $?} 42 @end example @noindent whereas a proper shell should have printed @samp{0}. But this is no longer a portability problem; any shell that supports functions gets it correct. However, it explains why some makefiles have lengthy constructs: @example if test -f "$file"; then install "$file" "$dest" else : fi @end example @item @command{printf} @c ------------------ @prindex @command{printf} A format string starting with a @samp{-} can cause problems. Bash interprets it as an option and gives an error. And @samp{--} to mark the end of options is not good in the NetBSD Almquist shell (e.g., 0.4.6) which takes that literally as the format string. Putting the @samp{-} in a @samp{%c} or @samp{%s} is probably easiest: @example printf %s -foo @end example AIX 7.2 @command{sh} mishandles octal escapes in multi-byte locales by treating them as characters instead of bytes. For example, in a locale using the UTF-8 encoding, @samp{printf '\351'} outputs the two bytes C3, A9 (the UTF-8 encoding for U+00E9) instead of the desired single byte E9. To work around the bug, use the C locale. Bash 2.03 mishandles an escape sequence that happens to evaluate to @samp{%}: @example $ @kbd{printf '\045'} bash: printf: `%': missing format character @end example Large outputs may cause trouble. On Solaris 2.5.1 through 10, for example, @file{/usr/bin/printf} is buggy, so when using @command{/bin/sh} the command @samp{printf %010000x 123} normally dumps core. Since @command{printf} is not always a shell builtin, there is a potential speed penalty for using @code{printf '%s\n'} as a replacement for an @command{echo} that does not interpret @samp{\} or leading @samp{-}. With Solaris @command{ksh}, it is possible to use @code{print -r --} for this role instead. @xref{echo, , Limitations of Shell Builtins}, for a discussion of portable alternatives to both @command{printf} and @command{echo}. @item @command{pwd} @c ---------------- @prindex @command{pwd} With modern shells, plain @command{pwd} outputs a ``logical'' directory name, some of whose components may be symbolic links. These directory names are in contrast to ``physical'' directory names, whose components are all directories. Posix 1003.1-2001 requires that @command{pwd} must support the @option{-L} (``logical'') and @option{-P} (``physical'') options, with @option{-L} being the default. However, traditional shells do not support these options, and their @command{pwd} command has the @option{-P} behavior. Portable scripts should assume neither option is supported, and should assume neither behavior is the default. Also, on many hosts @samp{/bin/pwd} is equivalent to @samp{pwd -P}, but Posix does not require this behavior and portable scripts should not rely on it. Typically it's best to use plain @command{pwd}. On modern hosts this outputs logical directory names, which have the following advantages: @itemize @bullet @item Logical names are what the user specified. @item Physical names may not be portable from one installation host to another due to network file system gymnastics. @item On modern hosts @samp{pwd -P} may fail due to lack of permissions to some parent directory, but plain @command{pwd} cannot fail for this reason. @end itemize Also please see the discussion of the @command{cd} command. @item @command{read} @c ----------------- @prindex @command{read} No options are portable, not even support @option{-r} (Solaris 10 @command{/bin/sh} for example). Tru64/OSF 5.1 @command{sh} treats @command{read} as a special built-in, so it may exit if input is redirected from a non-existent or unreadable file. @anchor{set} @item @command{set} @c ---------------- @prindex @command{set} With the FreeBSD 6.0 shell, the @command{set} command (without any options) does not sort its output. The @command{set} builtin faces the usual problem with arguments starting with a dash. Modern shells such as Bash or Zsh understand @option{--} to specify the end of the options (any argument after @option{--} is a parameter, even @samp{-x} for instance), but many traditional shells (e.g., Solaris 10 @command{/bin/sh}) simply stop option processing as soon as a non-option argument is found. Therefore, use @samp{dummy} or simply @samp{x} to end the option processing, and use @command{shift} to pop it out: @example set x $my_list; shift @end example Avoid @samp{set -}, e.g., @samp{set - $my_list}. Posix no longer requires support for this command, and in traditional shells @samp{set - $my_list} resets the @option{-v} and @option{-x} options, which makes scripts harder to debug. Some nonstandard shells do not recognize more than one option (e.g., @samp{set -e -x} assigns @samp{-x} to the command line). It is better to combine them: @example set -ex @end example @cindex @command{set -e} The @option{-e} option has historically been under-specified, with enough ambiguities to cause numerous differences across various shell implementations; see for example @uref{https://www.in-ulm.de/@/~mascheck/@/various/@/set-e/, this overview}, or @uref{https://www.austingroupbugs.net/@/view.php?id=52, this link}, documenting a change to Posix 2008 to match @command{ksh88} behavior. Note that mixing @code{set -e} and shell functions is asking for surprises: @example set -e doit() @{ rm file echo one @} doit || echo two @end example @noindent According to the recommendation, @samp{one} should always be output regardless of whether the @command{rm} failed, because it occurs within the body of the shell function @samp{doit} invoked on the left side of @samp{||}, where the effects of @samp{set -e} are not enforced. Likewise, @samp{two} should never be printed, since the failure of @command{rm} does not abort the function, such that the status of @samp{doit} is 0. The BSD shell has had several problems with the @option{-e} option. Older versions of the BSD shell (circa 1990) mishandled @samp{&&}, @samp{||}, @samp{if}, and @samp{case} when @option{-e} was in effect, causing the shell to exit unexpectedly in some cases. This was particularly a problem with makefiles, and led to circumlocutions like @samp{sh -c 'test -f file || touch file'}, where the seemingly-unnecessary @samp{sh -c '@dots{}'} wrapper works around the bug (@pxref{Failure in Make Rules}). Even relatively-recent versions of the BSD shell (e.g., OpenBSD 3.4) wrongly exit with @option{-e} if the last command within a compound statement fails and is guarded by an @samp{&&} only. For example: @example #! /bin/sh set -e foo='' test -n "$foo" && exit 1 echo one if :; then test -n "$foo" && exit 1 echo two test -n "$foo" && exit 1 fi echo three @end example @noindent does not print @samp{three}. One workaround is to change the last instance of @samp{test -n "$foo" && exit 1} to be @samp{if test -n "$foo"; then exit 1; fi} instead. Another possibility is to warn BSD users not to use @samp{sh -e}. When @samp{set -e} is in effect, a failed command substitution in Solaris 10 @command{/bin/sh} cannot be ignored, even with @samp{||}. @example $ @kbd{/bin/sh -c 'set -e; foo=`false` || echo foo; echo bar'} $ @kbd{bash -c 'set -e; foo=`false` || echo foo; echo bar'} foo bar @end example @noindent Moreover, a command substitution, successful or not, causes this shell to exit from a failing outer command even in presence of an @samp{&&} list: @example $ @kbd{bash -c 'set -e; false `true` && echo notreached; echo ok'} ok $ @kbd{sh -c 'set -e; false `true` && echo notreached; echo ok'} $ @end example Portable scripts should not use @samp{set -e} if @command{trap} is used to install an exit handler. This is because Tru64/OSF 5.1 @command{sh} sometimes enters the trap handler with the exit status of the command prior to the one that triggered the errexit handler: @example $ @kbd{sh -ec 'trap '\''echo $?'\'' 0; false'} 0 $ @kbd{sh -c 'set -e; trap '\''echo $?'\'' 0; false'} 1 @end example @noindent Thus, when writing a script in M4sh, rather than trying to rely on @samp{set -e}, it is better to append @samp{|| AS_EXIT} to any statement where it is desirable to abort on failure. @cindex @command{set -b} @cindex @command{set -m} Job control is not provided by all shells, so the use of @samp{set -m} or @samp{set -b} must be done with care. When using @command{zsh} in native mode, asynchronous notification (@samp{set -b}) is enabled by default, and using @samp{emulate sh} to switch to Posix mode does not clear this setting (although asynchronous notification has no impact unless job monitoring is also enabled). Also, @command{zsh} 4.3.10 and earlier have a bug where job control can be manipulated in interactive shells, but not in subshells or scripts. Furthermore, some shells, like @command{pdksh}, fail to treat subshells as interactive, even though the parent shell was. @example $ @kbd{echo $ZSH_VERSION} 4.3.10 $ @kbd{set -m; echo $?} 0 $ @kbd{zsh -c 'set -m; echo $?'} set: can't change option: -m $ @kbd{(set -m); echo $?} set: can't change option: -m 1 $ @kbd{pdksh -ci 'echo $-; (echo $-)'} cim c @end example @cindex @command{set -n} Use of @command{set -n} (typically via @command{sh -n script}) to validate a script is not foolproof. Modern @command{ksh93} tries to be helpful by informing you about better syntax, but switching the script to use the suggested syntax in order to silence the warnings would render the script no longer portable to older shells: @example $ @kbd{ksh -nc '``'} ksh: warning: line 1: `...` obsolete, use $(...) 0 @end example Furthermore, on ancient hosts, such as SunOS 4, @command{sh -n} could go into an infinite loop; even with that bug fixed, Solaris 8 @command{/bin/sh} takes extremely long to parse large scripts. Autoconf itself uses @command{sh -n} within its testsuite to check that correct scripts were generated, but only after first probing for other shell features (such as @code{test $@{BASH_VERSION+y@}}) that indicate a reasonably fast and working implementation. @item @command{shift} @c ------------------ @prindex @command{shift} Not only is @command{shift}ing a bad idea when there is nothing left to shift, but in addition it is not portable: the shell of MIPS RISC/OS 4.52 refuses to do it. Don't use @samp{shift 2} etc.; while it in the SVR1 shell (1983), it is also absent in many pre-Posix shells. @item @command{source} @c ------------------- @prindex @command{source} This command is not portable, as Posix does not require it; use @command{.} instead. @item @command{test} @c ----------------- @prindex @command{test} The @code{test} program is the way to perform many file and string tests. It is often invoked by the alternate name @samp{[}, but using that name in Autoconf code is asking for trouble since it is an M4 quote character. The @option{-a}, @option{-o}, @samp{(}, and @samp{)} operands are not present in all implementations, and have been marked obsolete by Posix 2008. This is because there are inherent ambiguities in using them. For example, @samp{test "$1" -a "$2"} looks like a binary operator to check whether two strings are both non-empty, but if @samp{$1} is the literal @samp{!}, then some implementations of @command{test} treat it as a negation of the unary operator @option{-a}. Thus, portable uses of @command{test} should never have more than four arguments, and scripts should use shell constructs like @samp{&&} and @samp{||} instead. If you combine @samp{&&} and @samp{||} in the same statement, keep in mind that they have equal precedence, so it is often better to parenthesize even when this is redundant. For example: @smallexample # Not portable: test "X$a" = "X$b" -a \ '(' "X$c" != "X$d" -o "X$e" = "X$f" ')' # Portable: test "X$a" = "X$b" && @{ test "X$c" != "X$d" || test "X$e" = "X$f"; @} @end smallexample @command{test} does not process options like most other commands do; for example, it does not recognize the @option{--} argument as marking the end of options. It is safe to use @samp{!} as a @command{test} operator. For example, @samp{if test ! -d foo; @dots{}} is portable even though @samp{if ! test -d foo; @dots{}} is not. @item @command{test} (files) @c ------------------------- To enable @command{configure} scripts to support cross-compilation, they shouldn't do anything that tests features of the build system instead of the host system. But occasionally you may find it necessary to check whether some arbitrary file exists. To do so, use @samp{test -f}, @samp{test -r}, or @samp{test -x}. Do not use @samp{test -e}, because Solaris 10 @command{/bin/sh} lacks it. To test for symbolic links on systems that have them, use @samp{test -h} rather than @samp{test -L}; either form conforms to Posix 1003.1-2001, but older shells like Solaris 8 @code{/bin/sh} support only @option{-h}. For historical reasons, Posix reluctantly allows implementations of @samp{test -x} that will succeed for the root user, even if no execute permissions are present. Furthermore, shells do not all agree on whether Access Control Lists should affect @samp{test -r}, @samp{test -w}, and @samp{test -x}; some shells base test results strictly on the current user id compared to file owner and mode, as if by @code{stat(2)}; while other shells base test results on whether the current user has the given right, even if that right is only granted by an ACL, as if by @code{faccessat(2)}. Furthermore, there is a classic time of check to time of use race between any use of @command{test} followed by operating on the just-checked file. Therefore, it is a good idea to write scripts that actually attempt an operation, and are prepared for the resulting failure if permission is denied, rather than trying to avoid an operation based solely on whether @command{test} guessed that it might not be permitted. @item @command{test} (strings) @c --------------------------- Posix says that @samp{test "@var{string}"} succeeds if @var{string} is not null, but this usage is not portable to traditional platforms like Solaris 10 @command{/bin/sh}, which mishandle strings like @samp{!} and @samp{-n}. However, it @emph{is} portable to test if a variable is set to a non-empty value, by using @samp{test $@{var+y@}}, since all known implementations properly distinguish between no arguments and a known-safe string of @samp{y}. Posix also says that @samp{test ! "@var{string}"}, @samp{test -n "@var{string}"} and @samp{test -z "@var{string}"} work with any string, but many shells (such as Solaris 10, AIX 3.2, UNICOS 10.0.0.6, Digital Unix 4, etc.)@: get confused if @var{string} looks like an operator: @example $ @kbd{test -n =} test: argument expected $ @kbd{test ! -n} test: argument expected $ @kbd{test -z ")"; echo $?} 0 @end example Similarly, Posix says that both @samp{test "@var{string1}" = "@var{string2"}} and @samp{test "@var{string1}" != "@var{string2"}} work for any pairs of strings, but in practice this is not true for troublesome strings that look like operators or parentheses, or that begin with @samp{-}. It is best to protect such strings with a leading @samp{X}, e.g., @samp{test "X@var{string}" != X} rather than @samp{test -n "@var{string}"} or @samp{test ! "@var{string}"}. It is common to find variations of the following idiom: @example test -n "`echo $ac_feature | sed 's/[-a-zA-Z0-9_]//g'`" && @var{action} @end example @noindent to take an action when a token matches a given pattern. Such constructs should be avoided by using: @example case $ac_feature in *[!-a-zA-Z0-9_]*) @var{action};; esac @end example If the pattern is a complicated regular expression that cannot be expressed as a shell pattern, use something like this instead: @example expr "X$ac_feature" : 'X.*[^-a-zA-Z0-9_]' >/dev/null && @var{action} @end example @samp{expr "X@var{foo}" : "X@var{bar}"} is more robust than @samp{echo "X@var{foo}" | grep "^X@var{bar}"}, because it avoids problems when @samp{@var{foo}} contains backslashes. @anchor{trap} @item @command{trap} @c ----------------- @prindex @command{trap} It is safe to trap at least the signals 1, 2, 13, and 15. You can also trap 0, i.e., have the @command{trap} run when the script ends (either via an explicit @command{exit}, or the end of the script). The trap for 0 should be installed outside of a shell function, or AIX 5.3 @command{/bin/sh} will invoke the trap at the end of this function. Posix says that @samp{trap - 1 2 13 15} resets the traps for the specified signals to their default values, but many common shells (e.g., Solaris 10 @command{/bin/sh}) misinterpret this and attempt to execute a ``command'' named @command{-} when the specified conditions arise. Posix 2008 also added a requirement to support @samp{trap 1 2 13 15} to reset traps, as this is supported by a larger set of shells, but there are still shells like @command{dash} that mistakenly try to execute @command{1} instead of resetting the traps. Therefore, there is no portable workaround, except for @samp{trap - 0}, for which @samp{trap '' 0} is a portable substitute. Although Posix is not absolutely clear on this point, it is widely admitted that when entering the trap @samp{$?} should be set to the exit status of the last command run before the trap. The ambiguity can be summarized as: ``when the trap is launched by an @command{exit}, what is the @emph{last} command run: that before @command{exit}, or @command{exit} itself?'' Bash considers @command{exit} to be the last command, while Zsh and Solaris 10 @command{/bin/sh} consider that when the trap is run it is @emph{still} in the @command{exit}, hence it is the previous exit status that the trap receives: @example $ @kbd{cat trap.sh} trap 'echo $?' 0 (exit 42); exit 0 $ @kbd{zsh trap.sh} 42 $ @kbd{bash trap.sh} 0 @end example The portable solution is then simple: when you want to @samp{exit 42}, run @samp{(exit 42); exit 42}, the first @command{exit} being used to set the exit status to 42 for Zsh, and the second to trigger the trap and pass 42 as exit status for Bash. In M4sh, this is covered by using @code{AS_EXIT}. The shell in FreeBSD 4.0 has the following bug: @samp{$?} is reset to 0 by empty lines if the code is inside @command{trap}. @example $ @kbd{trap 'false} echo $?' 0 $ @kbd{exit} 0 @end example @noindent Fortunately, this bug only affects @command{trap}. Several shells fail to execute an exit trap that is defined inside a subshell, when the last command of that subshell is not a builtin. A workaround is to use @samp{exit $?} as the shell builtin. @example $ @kbd{bash -c '(trap "echo hi" 0; /bin/true)'} hi $ @kbd{/bin/sh -c '(trap "echo hi" 0; /bin/true)'} $ @kbd{/bin/sh -c '(trap "echo hi" 0; /bin/true; exit $?)'} hi @end example @noindent Likewise, older implementations of @command{bash} failed to preserve @samp{$?} across an exit trap consisting of a single cleanup command. @example $ @kbd{bash -c 'trap "/bin/true" 0; exit 2'; echo $?} 2 $ @kbd{bash-2.05b -c 'trap "/bin/true" 0; exit 2'; echo $?} 0 $ @kbd{bash-2.05b -c 'trap ":; /bin/true" 0; exit 2'; echo $?} 2 @end example Be aware that a trap can be called from any number of places in your script, and therefore the trap handler should not make assumptions about shell state. For some examples, if your script temporarily modifies @env{IFS}, then the trap should include an initialization back to its typical value of space-tab-newline (autoconf does this for generated @file{configure} files). Likewise, if your script changes the current working directory at some point after the trap is installed, then your trap cannot assume which directory it is in, and should begin by changing directories to an absolute path if that is important to the cleanup efforts (autotest does this for generated @file{testsuite} files). @item @command{true} @c ----------------- @prindex @command{true} @c Info cannot handle `:' in index entries. @c @prindex @command{:} Don't worry: as far as we know @command{true} is portable. Nevertheless, it's not always a builtin (e.g., Bash 1.x), and the portable shell community tends to prefer using @command{:}. This has a funny side effect: when asked whether @command{false} is more portable than @command{true} Alexandre Oliva answered: @quotation In a sense, yes, because if it doesn't exist, the shell will produce an exit status of failure, which is correct for @command{false}, but not for @command{true}. @end quotation Remember that even though @samp{:} ignores its arguments, it still takes time to compute those arguments. It is a good idea to use double quotes around any arguments to @samp{:} to avoid time spent in field splitting and file name expansion. @anchor{unset} @item @command{unset} @c ------------------ @prindex @command{unset} In some nonconforming shells (e.g., Solaris 10 @command{/bin/ksh} and @command{/usr/xpg4/bin/sh}, NetBSD 5.99.43 sh, or Bash 2.05a), @code{unset FOO} fails when @code{FOO} is not set. This can interfere with @code{set -e} operation. You can use @smallexample FOO=; unset FOO @end smallexample @noindent if you are not sure that @code{FOO} is set. A few ancient shells lack @command{unset} entirely. For some variables such as @code{PS1}, you can use a neutralizing value instead: @smallexample PS1='$ ' @end smallexample Usually, shells that do not support @command{unset} need less effort to make the environment sane, so for example is not a problem if you cannot unset @command{CDPATH} on those shells. However, Bash 2.01 mishandles @code{unset MAIL} and @code{unset MAILPATH} in some cases and dumps core. So, you should do something like @smallexample ( (unset MAIL) || exit 1) >/dev/null 2>&1 && unset MAIL || : @end smallexample @noindent @xref{Special Shell Variables}, for some neutralizing values. Also, see @ref{export, , Limitations of Builtins}, for the case of environment variables. @item @command{wait} @c ----------------- @prindex @command{wait} The exit status of @command{wait} is not always reliable. @end table @node Limitations of Usual Tools @section Limitations of Usual Tools @cindex Limitations of usual tools The small set of tools you can expect to find on any machine can still include some limitations you should be aware of. @comment Between this list and the list of builtins above, we should @comment mention all the tools in GNU Coding Standards ``Utilities in @comment Makefiles''. @c This table includes things like `@command{expr} (|)', so we can't @c use @table @command. @table @asis @anchor{awk} @item @command{awk} @c ---------------- @prindex @command{awk} Don't leave white space before the opening parenthesis in a user function call. Posix does not allow this and GNU Awk rejects it: @example $ @kbd{gawk 'function die () @{ print "Aaaaarg!" @} BEGIN @{ die () @}'} gawk: cmd. line:2: BEGIN @{ die () @} gawk: cmd. line:2: ^ parse error $ @kbd{gawk 'function die () @{ print "Aaaaarg!" @} BEGIN @{ die() @}'} Aaaaarg! @end example Posix says that if a program contains only @samp{BEGIN} actions, and contains no instances of @code{getline}, then the program merely executes the actions without reading input. However, traditional Awk implementations (such as Solaris 10 @command{awk}) read and discard input in this case. Portable scripts can redirect input from @file{/dev/null} to work around the problem. For example: @example awk 'BEGIN @{print "hello world"@}' </dev/null @end example Posix says that in an @samp{END} action, @samp{$NF} (and presumably, @samp{$1}) retain their value from the last record read, if no intervening @samp{getline} occurred. However, some implementations (such as Solaris 10 @samp{/usr/bin/awk}, @samp{nawk}, or Darwin @samp{awk}) reset these variables. A workaround is to use an intermediate variable prior to the @samp{END} block. For example: @example $ @kbd{cat end.awk} @{ tmp = $1 @} END @{ print "a", $1, $NF, "b", tmp @} $ @kbd{echo 1 | awk -f end.awk} a b 1 $ @kbd{echo 1 | gawk -f end.awk} a 1 1 b 1 @end example If you want your program to be deterministic, don't depend on @code{for} on arrays: @example $ @kbd{cat for.awk} END @{ arr["foo"] = 1 arr["bar"] = 1 for (i in arr) print i @} $ @kbd{gawk -f for.awk </dev/null} foo bar $ @kbd{nawk -f for.awk </dev/null} bar foo @end example Some Awk implementations, such as HP-UX 11.0's native one, mishandle anchors: @example $ @kbd{echo xfoo | $AWK '/foo|^bar/ @{ print @}'} $ @kbd{echo bar | $AWK '/foo|^bar/ @{ print @}'} bar $ @kbd{echo xfoo | $AWK '/^bar|foo/ @{ print @}'} xfoo $ @kbd{echo bar | $AWK '/^bar|foo/ @{ print @}'} bar @end example @noindent Either do not depend on such patterns (i.e., use @samp{/^(.*foo|bar)/}, or use a simple test to reject such implementations. On @samp{ia64-hp-hpux11.23}, Awk mishandles @code{printf} conversions after @code{%u}: @example $ @kbd{awk 'BEGIN @{ printf "%u %d\n", 0, -1 @}'} 0 0 @end example AIX version 5.2 has an arbitrary limit of 399 on the length of regular expressions and literal strings in an Awk program. Traditional Awk implementations derived from Unix version 7, such as Solaris @command{/bin/awk}, have many limitations and do not conform to Posix. Nowadays @code{AC_PROG_AWK} (@pxref{Particular Programs}) finds you an Awk that doesn't have these problems, but if for some reason you prefer not to use @code{AC_PROG_AWK} you may need to address them. For more detailed descriptions, see @ref{Language History, , @command{awk} language history, gawk, GNU Awk User's Guide}. Traditional Awk does not support multidimensional arrays or user-defined functions. Traditional Awk does not support the @option{-v} option. You can use assignments after the program instead, e.g., @code{$AWK '@{print v $1@}' v=x}; however, don't forget that such assignments are not evaluated until they are encountered (e.g., after any @code{BEGIN} action). Traditional Awk does not support the keywords @code{delete} or @code{do}. Traditional Awk does not support the expressions @code{@var{a}?@var{b}:@var{c}}, @code{!@var{a}}, @code{@var{a}^@var{b}}, or @code{@var{a}^=@var{b}}. Traditional Awk does not support the predefined @code{CONVFMT} or @code{ENVIRON} variables. Traditional Awk supports only the predefined functions @code{exp}, @code{index}, @code{int}, @code{length}, @code{log}, @code{split}, @code{sprintf}, @code{sqrt}, and @code{substr}. Traditional Awk @code{getline} is not at all compatible with Posix; avoid it. Traditional Awk has @code{for (i in a) @dots{}} but no other uses of the @code{in} keyword. For example, it lacks @code{if (i in a) @dots{}}. In code portable to both traditional and modern Awk, @code{FS} must be a string containing just one ordinary character, and similarly for the field-separator argument to @code{split}. Traditional Awk has a limit of 99 fields in a record. Since some Awk implementations, like Tru64's, split the input even if you don't refer to any field in the script, to circumvent this problem, set @samp{FS} to an unusual character and use @code{split}. Traditional Awk has a limit of at most 99 bytes in a number formatted by @code{OFMT}; for example, @code{OFMT="%.300e"; print 0.1;} typically dumps core. The original version of Awk had a limit of at most 99 bytes per @code{split} field, 99 bytes per @code{substr} substring, and 99 bytes per run of non-special characters in a @code{printf} format, but these bugs have been fixed on all practical hosts that we know of. HP-UX 11.00 and IRIX 6.5 Awk require that input files have a line length of at most 3070 bytes. @item @command{basename} @c --------------------- @prindex @command{basename} Not all hosts have a working @command{basename}. You can use @command{expr} instead. @c AS_BASENAME is to be replaced by a better API. @ignore Not all hosts have a working @command{basename}, and you should instead use @code{AS_BASENAME} (@pxref{Programming in M4sh}), followed by @command{expr} if you need to strip a suffix. For example: @example a=`basename "$aname"` # This is not portable. a=`AS_BASENAME(["$aname"])` # This is more portable. # This is not portable. c=`basename "$cname" .c` # This is more portable. c=`AS_BASENAME(["$cname"])` case $c in ?*.c) c=`expr "X$c" : 'X\(.*\)\.c'`;; esac @end example @end ignore @item @command{cat} @c ---------------- @prindex @command{cat} Don't rely on any option. @item @command{cc} @c --------------- @prindex @command{cc} The command @samp{cc -c foo.c} traditionally produces an object file named @file{foo.o}. Most compilers allow @option{-c} to be combined with @option{-o} to specify a different object file name, but Posix does not require this combination and a few compilers lack support for it. @xref{C Compiler}, for how GNU Make tests for this feature with @code{AC_PROG_CC_C_O}. When a compilation such as @samp{cc -o foo foo.c} fails, some compilers (such as CDS on Reliant Unix) leave a @file{foo.o}. HP-UX @command{cc} doesn't accept @file{.S} files to preprocess and assemble. @samp{cc -c foo.S} appears to succeed, but in fact does nothing. The default executable, produced by @samp{cc foo.c}, can be @itemize @item @file{a.out} -- usual Posix convention. @item @file{b.out} -- i960 compilers (including @command{gcc}). @item @file{a.exe} -- DJGPP port of @command{gcc}. @item @file{a_out.exe} -- GNV @command{cc} wrapper for DEC C on OpenVMS. @item @file{foo.exe} -- various MS-DOS compilers. @end itemize The C compiler's traditional name is @command{cc}, but other names like @command{gcc} are common. Posix 1003.1-2001 and 1003.1-2008 specify the name @command{c99}, but older Posix editions specified @command{c89}, future POSIX standards will likely specify @command{c11}, and anyway these standard names are rarely used in practice. Typically the C compiler is invoked from makefiles that use @samp{$(CC)}, so the value of the @samp{CC} make variable selects the compiler name. @item @command{chgrp} @itemx @command{chown} @c ------------------- @prindex @command{chgrp} @prindex @command{chown} It is not portable to change a file's group to a group that the owner does not belong to. @item @command{chmod} @c ------------------ @prindex @command{chmod} Avoid usages like @samp{chmod -w file}; use @samp{chmod a-w file} instead, for two reasons. First, plain @option{-w} does not necessarily make the file unwritable, since it does not affect mode bits that correspond to bits in the file mode creation mask. Second, Posix says that the @option{-w} might be interpreted as an implementation-specific option, not as a mode; Posix suggests using @samp{chmod -- -w file} to avoid this confusion, but unfortunately @samp{--} does not work on some older hosts. @item @command{cmp} @c ---------------- @prindex @command{cmp} @command{cmp} performs a raw data comparison of two files, while @command{diff} compares two text files. Therefore, if you might compare DOS files, even if only checking whether two files are different, use @command{diff} to avoid spurious differences due to differences of newline encoding. @item @command{cp} @c --------------- @prindex @command{cp} Avoid the @option{-r} option, since Posix 1003.1-2004 marks it as obsolescent and its behavior on special files is implementation-defined. Use @option{-R} instead. On GNU hosts the two options are equivalent, but on Solaris hosts (for example) @code{cp -r} reads from pipes instead of replicating them. AIX 5.3 @code{cp -R} may corrupt its own memory with some directory hierarchies and error out or dump core: @example @kbd{mkdir -p 12345678/12345678/12345678/12345678} @kbd{touch 12345678/12345678/x} @kbd{cp -R 12345678 t} cp: 0653-440 12345678/12345678/: name too long. @end example Some @command{cp} implementations (e.g., BSD/OS 4.2) do not allow trailing slashes at the end of nonexistent destination directories. To avoid this problem, omit the trailing slashes. For example, use @samp{cp -R source /tmp/newdir} rather than @samp{cp -R source /tmp/newdir/} if @file{/tmp/newdir} does not exist. The @option{-f} option is portable nowadays. @cindex timestamp resolution Traditionally, file timestamps had 1-second resolution, and @samp{cp -p} copied the timestamps exactly. However, many modern file systems have timestamps with 1-nanosecond resolution. Unfortunately, some older @samp{cp -p} implementations truncate timestamps when copying files, which can cause the destination file to appear to be older than the source. The exact amount of truncation depends on the resolution of the system calls that @command{cp} uses. Traditionally this was @code{utime}, which has 1-second resolution. Less-ancient @command{cp} implementations such as GNU Core Utilities 5.0.91 (2003) use @code{utimes}, which has 1-microsecond resolution. Modern implementations such as GNU Core Utilities 6.12 (2008) can set timestamps to the full nanosecond resolution, using the modern system calls @code{futimens} and @code{utimensat} when they are available. As of 2011, though, many platforms do not yet fully support these new system calls. Bob Proulx notes that @samp{cp -p} always @emph{tries} to copy ownerships. But whether it actually does copy ownerships or not is a system dependent policy decision implemented by the kernel. If the kernel allows it then it happens. If the kernel does not allow it then it does not happen. It is not something @command{cp} itself has control over. In Unix System V any user can chown files to any other user, and System V also has a non-sticky @file{/tmp}. That probably derives from the heritage of System V in a business environment without hostile users. BSD changed this to be a more secure model where only root can @command{chown} files and a sticky @file{/tmp} is used. That undoubtedly derives from the heritage of BSD in a campus environment. GNU/Linux and Solaris by default follow BSD, but can be configured to allow a System V style @command{chown}. On the other hand, HP-UX follows System V, but can be configured to use the modern security model and disallow @command{chown}. Since it is an administrator-configurable parameter you can't use the name of the kernel as an indicator of the behavior. @item @command{date} @c ----------------- @prindex @command{date} Some versions of @command{date} do not recognize special @samp{%} directives, and unfortunately, instead of complaining, they just pass them through, and exit with success: @example $ @kbd{uname -a} OSF1 medusa.sis.pasteur.fr V5.1 732 alpha $ @kbd{date "+%s"} %s @end example @item @command{diff} @c ----------------- @prindex @command{diff} Option @option{-u} is nonportable. Some implementations, such as Tru64's, fail when comparing to @file{/dev/null}. Use an empty file instead. @item @command{dirname} @c -------------------- @prindex @command{dirname} Not all hosts have a working @command{dirname}, and you should instead use @code{AS_DIRNAME} (@pxref{Programming in M4sh}). For example: @example dir=`dirname "$file"` # This is not portable. dir=`AS_DIRNAME(["$file"])` # This is more portable. @end example @item @command{egrep} @c ------------------ @prindex @command{egrep} Although Posix stopped requiring @command{egrep} in 2001, a few traditional hosts (notably Solaris) do not support the Posix replacement @code{grep -E}. Also, some traditional implementations do not work on long input lines. To work around these problems, invoke @code{AC_PROG_EGREP} and then use @code{$EGREP}. Portable extended regular expressions should use @samp{\} only to escape characters in the string @samp{$()*+.?[\^@{|}. For example, @samp{\@}} is not portable, even though it typically matches @samp{@}}. The empty alternative is not portable. Use @samp{?} instead. For instance with Digital Unix v5.0: @example > printf "foo\n|foo\n" | $EGREP '^(|foo|bar)$' |foo > printf "bar\nbar|\n" | $EGREP '^(foo|bar|)$' bar| > printf "foo\nfoo|\n|bar\nbar\n" | $EGREP '^(foo||bar)$' foo |bar @end example @command{$EGREP} also suffers the limitations of @command{grep} (@pxref{grep, , Limitations of Usual Tools}). @item @command{expr} @c ----------------- @prindex @command{expr} Not all implementations obey the Posix rule that @samp{--} separates options from arguments; likewise, not all implementations provide the extension to Posix that the first argument can be treated as part of a valid expression rather than an invalid option if it begins with @samp{-}. When performing arithmetic, use @samp{expr 0 + $var} if @samp{$var} might be a negative number, to keep @command{expr} from interpreting it as an option. No @command{expr} keyword starts with @samp{X}, so use @samp{expr X"@var{word}" : 'X@var{regex}'} to keep @command{expr} from misinterpreting @var{word}. Don't use @code{length}, @code{substr}, @code{match} and @code{index}. @item @command{expr} (@samp{|}) @prindex @command{expr} (@samp{|}) You can use @samp{|}. Although Posix does require that @samp{expr ''} return the empty string, it does not specify the result when you @samp{|} together the empty string (or zero) with the empty string. For example: @example expr '' \| '' @end example Posix 1003.2-1992 returns the empty string for this case, but traditional Unix returns @samp{0} (Solaris is one such example). In Posix 1003.1-2001, the specification was changed to match traditional Unix's behavior (which is bizarre, but it's too late to fix this). Please note that the same problem does arise when the empty string results from a computation, as in: @example expr bar : foo \| foo : bar @end example @noindent Avoid this portability problem by avoiding the empty string. @item @command{expr} (@samp{:}) @c ---------------------------- @prindex @command{expr} Portable @command{expr} regular expressions should use @samp{\} to escape only characters in the string @samp{$()*.0123456789[\^n@{@}}. For example, alternation, @samp{\|}, is common but Posix does not require its support, so it should be avoided in portable scripts. Similarly, @samp{\+} and @samp{\?} should be avoided. Portable @command{expr} regular expressions should not begin with @samp{^}. Patterns are automatically anchored so leading @samp{^} is not needed anyway. On the other hand, the behavior of the @samp{$} anchor is not portable on multi-line strings. Posix is ambiguous whether the anchor applies to each line, as was done in older versions of the GNU Core Utilities, or whether it applies only to the end of the overall string, as in Coreutils 6.0 and most other implementations. @example $ @kbd{baz='foo} > @kbd{bar'} $ @kbd{expr "X$baz" : 'X\(foo\)$'} $ @kbd{expr-5.97 "X$baz" : 'X\(foo\)$'} foo @end example The Posix standard is ambiguous as to whether @samp{expr 'a' : '\(b\)'} outputs @samp{0} or the empty string. In practice, it outputs the empty string on most platforms, but portable scripts should not assume this. For instance, the QNX 4.25 native @command{expr} returns @samp{0}. One might think that a way to get a uniform behavior would be to use the empty string as a default value: @example expr a : '\(b\)' \| '' @end example @noindent Unfortunately this behaves exactly as the original expression; see the @command{expr} (@samp{|}) entry for more information. Some ancient @command{expr} implementations (e.g., SunOS 4 @command{expr} and Solaris 8 @command{/usr/ucb/expr}) have a silly length limit that causes @command{expr} to fail if the matched substring is longer than 120 bytes. In this case, you might want to fall back on @samp{echo|sed} if @command{expr} fails. Nowadays this is of practical importance only for the rare installer who mistakenly puts @file{/usr/ucb} before @file{/usr/bin} in @env{PATH}. On Mac OS X 10.4, @command{expr} mishandles the pattern @samp{[^-]} in some cases. For example, the command @example expr Xpowerpc-apple-darwin8.1.0 : 'X[^-]*-[^-]*-\(.*\)' @end example @noindent outputs @samp{apple-darwin8.1.0} rather than the correct @samp{darwin8.1.0}. This particular case can be worked around by substituting @samp{[^--]} for @samp{[^-]}. Don't leave, there is some more! The QNX 4.25 @command{expr}, in addition of preferring @samp{0} to the empty string, has a funny behavior in its exit status: it's always 1 when parentheses are used! @example $ @kbd{val=`expr 'a' : 'a'`; echo "$?: $val"} 0: 1 $ @kbd{val=`expr 'a' : 'b'`; echo "$?: $val"} 1: 0 $ @kbd{val=`expr 'a' : '\(a\)'`; echo "?: $val"} 1: a $ @kbd{val=`expr 'a' : '\(b\)'`; echo "?: $val"} 1: 0 @end example @noindent In practice this can be a big problem if you are ready to catch failures of @command{expr} programs with some other method (such as using @command{sed}), since you may get twice the result. For instance @example $ @kbd{expr 'a' : '\(a\)' || echo 'a' | sed 's/^\(a\)$/\1/'} @end example @noindent outputs @samp{a} on most hosts, but @samp{aa} on QNX 4.25. A simple workaround consists of testing @command{expr} and using a variable set to @command{expr} or to @command{false} according to the result. Tru64 @command{expr} incorrectly treats the result as a number, if it can be interpreted that way: @example $ @kbd{expr 00001 : '.*\(...\)'} 1 @end example On HP-UX 11, @command{expr} only supports a single sub-expression. @example $ @kbd{expr 'Xfoo' : 'X\(f\(oo\)*\)$'} expr: More than one '\(' was used. @end example @item @command{fgrep} @c ------------------ @prindex @command{fgrep} Although Posix stopped requiring @command{fgrep} in 2001, a few traditional hosts (notably Solaris) do not support the Posix replacement @code{grep -F}. Also, some traditional implementations do not work on long input lines. To work around these problems, invoke @code{AC_PROG_FGREP} and then use @code{$FGREP}. Tru64/OSF 5.1 @command{fgrep} does not match an empty pattern. @item @command{find} @c ----------------- @prindex @command{find} The @option{-maxdepth} option seems to be GNU specific. Tru64 v5.1, NetBSD 1.5 and Solaris @command{find} commands do not understand it. The replacement of @samp{@{@}} is guaranteed only if the argument is exactly @emph{@{@}}, not if it's only a part of an argument. For instance on DU, and HP-UX 10.20 and HP-UX 11: @example $ @kbd{touch foo} $ @kbd{find . -name foo -exec echo "@{@}-@{@}" \;} @{@}-@{@} @end example @noindent while GNU @command{find} reports @samp{./foo-./foo}. @anchor{grep} @item @command{grep} @c ----------------- @prindex @command{grep} Portable scripts can rely on the @command{grep} options @option{-c}, @option{-l}, @option{-n}, and @option{-v}, but should avoid other options. For example, don't use @option{-w}, as Posix does not require it and Irix 6.5.16m's @command{grep} does not support it. Also, portable scripts should not combine @option{-c} with @option{-l}, as Posix does not allow this. Some of the options required by Posix are not portable in practice. Don't use @samp{grep -q} to suppress output, because traditional @command{grep} implementations (e.g., Solaris) do not support @option{-q}. Don't use @samp{grep -s} to suppress output either, because Posix says @option{-s} does not suppress output, only some error messages; also, the @option{-s} option of traditional @command{grep} behaved like @option{-q} does in most modern implementations. Instead, redirect the standard output and standard error (in case the file doesn't exist) of @code{grep} to @file{/dev/null}. Check the exit status of @code{grep} to determine whether it found a match. The QNX4 implementation fails to count lines with @code{grep -c '$'}, but works with @code{grep -c '^'}. Other alternatives for counting lines are to use @code{sed -n '$='} or @code{wc -l}. Some traditional @command{grep} implementations do not work on long input lines. On AIX the default @code{grep} silently truncates long lines on the input before matching. Also, traditional implementations do not support multiple regexps with @option{-e}: they either reject @option{-e} entirely (e.g., Solaris) or honor only the last pattern (e.g., IRIX 6.5 and NeXT). To work around these problems, invoke @code{AC_PROG_GREP} and then use @code{$GREP}. Another possible workaround for the multiple @option{-e} problem is to separate the patterns by newlines, for example: @example grep 'foo bar' in.txt @end example @noindent except that this fails with traditional @command{grep} implementations and with OpenBSD 3.8 @command{grep}. Traditional @command{grep} implementations (e.g., Solaris) do not support the @option{-E} or @option{-F} options. To work around these problems, invoke @code{AC_PROG_EGREP} and then use @code{$EGREP}, and similarly for @code{AC_PROG_FGREP} and @code{$FGREP}. Even if you are willing to require support for Posix @command{grep}, your script should not use both @option{-E} and @option{-F}, since Posix does not allow this combination. Portable @command{grep} regular expressions should use @samp{\} only to escape characters in the string @samp{$()*.0123456789[\^@{@}}. For example, alternation, @samp{\|}, is common but Posix does not require its support in basic regular expressions, so it should be avoided in portable scripts. Solaris and HP-UX @command{grep} do not support it. Similarly, the following escape sequences should also be avoided: @samp{\<}, @samp{\>}, @samp{\+}, @samp{\?}, @samp{\`}, @samp{\'}, @samp{\B}, @samp{\b}, @samp{\S}, @samp{\s}, @samp{\W}, and @samp{\w}. Posix does not specify the behavior of @command{grep} on binary files. An example where this matters is using BSD @command{grep} to search text that includes embedded ANSI escape sequences for colored output to terminals (@samp{\033[m} is the sequence to restore normal output); the behavior depends on whether input is seekable: @example $ @kbd{printf 'esc\033[mape\n' > sample} $ @kbd{grep . sample} Binary file sample matches $ @kbd{cat sample | grep .} escape @end example @item @command{join} @c ----------------- @prindex @command{join} Solaris 8 @command{join} has bugs when the second operand is standard input, and when standard input is a pipe. For example, the following shell script causes Solaris 8 @command{join} to loop forever: @example cat >file <<'EOF' 1 x 2 y EOF cat file | join file - @end example Use @samp{join - file} instead. On NetBSD, @command{join -a 1 file1 file2} mistakenly behaves like @command{join -a 1 -a 2 1 file1 file2}, resulting in a usage warning; the workaround is to use @command{join -a1 file1 file2} instead. @item @command{ln} @c --------------- @prindex @command{ln} The @option{-f} option is portable nowadays. @cindex Symbolic links Symbolic links are not available on some systems; use @samp{$(LN_S)} as a portable substitute. For versions of the DJGPP before 2.04, @command{ln} emulates symbolic links to executables by generating a stub that in turn calls the real program. This feature also works with nonexistent files like in the Posix spec. So @samp{ln -s file link} generates @file{link.exe}, which attempts to call @file{file.exe} if run. But this feature only works for executables, so @samp{cp -p} is used instead for these systems. DJGPP versions 2.04 and later have full support for symbolic links. @item @command{ls} @c --------------- @prindex @command{ls} @cindex Listing directories The portable options are @option{-acdilrtu}. Current practice is for @option{-l} to output both owner and group, even though ancient versions of @command{ls} omitted the group. On ancient hosts, @samp{ls foo} sent the diagnostic @samp{foo not found} to standard output if @file{foo} did not exist. Hence a shell command like @samp{sources=`ls *.c 2>/dev/null`} did not always work, since it was equivalent to @samp{sources='*.c not found'} in the absence of @samp{.c} files. This is no longer a practical problem, since current @command{ls} implementations send diagnostics to standard error. The behavior of @command{ls} on a directory that is being concurrently modified is not always predictable, because of a data race where cached information returned by @code{readdir} does not match the current directory state. In fact, MacOS 10.5 has an intermittent bug where @code{readdir}, and thus @command{ls}, sometimes lists a file more than once if other files were added or removed from the directory immediately prior to the @command{ls} call. Since @command{ls} already sorts its output, the duplicate entries can be avoided by piping the results through @code{uniq}. @anchor{mkdir} @item @command{mkdir} @c ------------------ @prindex @command{mkdir} @cindex Making directories No @command{mkdir} option is portable to older systems. Instead of @samp{mkdir -p @var{file-name}}, you should use @code{AS_MKDIR_P(@var{file-name})} (@pxref{Programming in M4sh}) or @code{AC_PROG_MKDIR_P} (@pxref{Particular Programs}). Combining the @option{-m} and @option{-p} options, as in @samp{mkdir -m go-w -p @var{dir}}, often leads to trouble. FreeBSD @command{mkdir} incorrectly attempts to change the permissions of @var{dir} even if it already exists. HP-UX 11.23 and IRIX 6.5 @command{mkdir} often assign the wrong permissions to any newly-created parents of @var{dir}. Posix does not clearly specify whether @samp{mkdir -p foo} should succeed when @file{foo} is a symbolic link to an already-existing directory. The GNU Core Utilities 5.1.0 @command{mkdir} succeeds, but Solaris @command{mkdir} fails. Traditional @code{mkdir -p} implementations suffer from race conditions. For example, if you invoke @code{mkdir -p a/b} and @code{mkdir -p a/c} at the same time, both processes might detect that @file{a} is missing, one might create @file{a}, then the other might try to create @file{a} and fail with a @code{File exists} diagnostic. The GNU Core Utilities (@samp{fileutils} version 4.1), FreeBSD 5.0, NetBSD 2.0.2, and OpenBSD 2.4 are known to be race-free when two processes invoke @code{mkdir -p} simultaneously, but earlier versions are vulnerable. Solaris @command{mkdir} is still vulnerable as of Solaris 10, and other traditional Unix systems are probably vulnerable too. This possible race is harmful in parallel builds when several Make rules call @code{mkdir -p} to construct directories. You may use @code{install-sh -d} as a safe replacement, provided this script is recent enough; the copy shipped with Autoconf 2.60 and Automake 1.10 is OK, but copies from older versions are vulnerable. @item @command{mkfifo} @itemx @command{mknod} @c ------------------- @prindex @command{mkfifo} @prindex @command{mknod} The GNU Coding Standards state that @command{mknod} is safe to use on platforms where it has been tested to exist; but it is generally portable only for creating named FIFOs, since device numbers are platform-specific. Autotest uses @command{mkfifo} to implement parallel testsuites. Posix states that behavior is unspecified when opening a named FIFO for both reading and writing; on at least Cygwin, this results in failure on any attempt to read or write to that file descriptor. @item @command{mktemp} @c ------------------- @prindex @command{mktemp} @cindex Creating temporary files Shell scripts can use temporary files safely with @command{mktemp}, but it does not exist on all systems. A portable way to create a safe temporary file name is to create a temporary directory with mode 700 and use a file inside this directory. Both methods prevent attackers from gaining control, though @command{mktemp} is far less likely to fail gratuitously under attack. Here is sample code to create a new temporary directory @samp{$dir} safely: @example # Create a temporary directory $dir in $TMPDIR (default /tmp). # Use mktemp if possible; otherwise fall back on mkdir, # with $RANDOM to make collisions less likely. : "$@{TMPDIR:=/tmp@}" @{ dir=` (umask 077 && mktemp -d "$TMPDIR/fooXXXXXX") 2>/dev/null ` && test -d "$dir" @} || @{ dir=$TMPDIR/foo$$-$RANDOM @c $$ restore font-lock (umask 077 && mkdir "$dir") @} || exit $? @end example @item @command{mv} @c --------------- @prindex @command{mv} @cindex Moving open files The only portable options are @option{-f} and @option{-i}. Moving individual files between file systems is portable (it was in Unix version 6), but it is not always atomic: when doing @samp{mv new existing}, there's a critical section where neither the old nor the new version of @file{existing} actually exists. On some systems moving files from @file{/tmp} can sometimes cause undesirable (but perfectly valid) warnings, even if you created these files. This is because @file{/tmp} belongs to a group that ordinary users are not members of, and files created in @file{/tmp} inherit the group of @file{/tmp}. When the file is copied, @command{mv} issues a diagnostic without failing: @smallexample $ @kbd{touch /tmp/foo} $ @kbd{mv /tmp/foo .} @error{}mv: ./foo: set owner/group (was: 100/0): Operation not permitted $ @kbd{echo $?} 0 $ @kbd{ls foo} foo @end smallexample @noindent This annoying behavior conforms to Posix, unfortunately. Moving directories across mount points is not portable, use @command{cp} and @command{rm}. DOS variants cannot rename or remove open files, and do not support commands like @samp{mv foo bar >foo}, even though this is perfectly portable among Posix hosts. @item @command{od} @c --------------- @prindex @command{od} In MacOS X versions prior to 10.4.3, @command{od} does not support the standard Posix options @option{-A}, @option{-j}, @option{-N}, or @option{-t}, or the XSI option, @option{-s}. The only supported Posix option is @option{-v}, and the only supported XSI options are those in @option{-bcdox}. The BSD @command{hexdump} program can be used instead. In some versions of some operating systems derived from Solaris 11, @command{od} prints decimal byte values padded with zeroes rather than with spaces: @smallexample $ @kbd{printf '#!' | od -A n -t d1 -N 2} 035 033 @end smallexample @noindent instead of @smallexample $ @kbd{printf '#!' | od -A n -t d1 -N 2} 35 33 @end smallexample We have observed this on both OpenIndiana and OmniOS; Illumos may also be affected. As a workaround, you can use octal output (option @code{-t o1}). @item @command{rm} @c --------------- @prindex @command{rm} The @option{-f} and @option{-r} options are portable. It is not portable to invoke @command{rm} without options or operands. On the other hand, Posix now requires @command{rm -f} to silently succeed when there are no operands (useful for constructs like @command{rm -rf $filelist} without first checking if @samp{$filelist} was empty). But this was not always portable; at least NetBSD @command{rm} built before 2008 would fail with a diagnostic. A file might not be removed even if its parent directory is writable and searchable. Many Posix hosts cannot remove a mount point, a named stream, a working directory, or a last link to a file that is being executed. DOS variants cannot rename or remove open files, and do not support commands like @samp{rm foo >foo}, even though this is perfectly portable among Posix hosts. @item @command{rmdir} @c ------------------ @prindex @command{rmdir} Just as with @command{rm}, some platforms refuse to remove a working directory. @anchor{sed} @item @command{sed} @c ---------------- @prindex @command{sed} Patterns should not include the separator (unless escaped), even as part of a character class. In conformance with Posix, the Cray @command{sed} rejects @samp{s/[^/]*$//}: use @samp{s%[^/]*$%%}. Even when escaped, patterns should not include separators that are also used as @command{sed} metacharacters. For example, GNU sed 4.0.9 rejects @samp{s,x\@{1\,\@},,}, while sed 4.1 strips the backslash before the comma before evaluating the basic regular expression. Avoid empty patterns within parentheses (i.e., @samp{\(\)}). Posix does not require support for empty patterns, and Unicos 9 @command{sed} rejects them. Unicos 9 @command{sed} loops endlessly on patterns like @samp{.*\n.*}. Sed scripts should not use branch labels longer than 7 characters and should not contain comments; AIX 5.3 @command{sed} rejects indented comments. HP-UX sed has a limit of 99 commands (not counting @samp{:} commands) and 48 labels, which cannot be circumvented by using more than one script file. It can execute up to 19 reads with the @samp{r} command per cycle. Solaris @command{/usr/ucb/sed} rejects usages that exceed a limit of about 6000 bytes for the internal representation of commands. Avoid redundant @samp{;}, as some @command{sed} implementations, such as NetBSD 1.4.2's, incorrectly try to interpret the second @samp{;} as a command: @example $ @kbd{echo a | sed 's/x/x/;;s/x/x/'} sed: 1: "s/x/x/;;s/x/x/": invalid command code ; @end example Some @command{sed} implementations have a buffer limited to 4000 bytes, and this limits the size of input lines, output lines, and internal buffers that can be processed portably. Likewise, not all @command{sed} implementations can handle embedded @code{NUL} or a missing trailing newline. Remember that ranges within a bracket expression of a regular expression are only well-defined in the @samp{C} (or @samp{POSIX}) locale. Meanwhile, support for character classes like @samp{[[:upper:]]} is not yet universal, so if you cannot guarantee the setting of @env{LC_ALL}, it is better to spell out a range @samp{[ABCDEFGHIJKLMNOPQRSTUVWXYZ]} than to rely on @samp{[A-Z]}. Additionally, Posix states that regular expressions are only well-defined on characters. Unfortunately, there exist platforms such as MacOS X 10.5 where not all 8-bit byte values are valid characters, even though that platform has a single-byte @samp{C} locale. And Posix allows the existence of a multi-byte @samp{C} locale, although that does not yet appear to be a common implementation. At any rate, it means that not all bytes will be matched by the regular expression @samp{.}: @example $ @kbd{printf '\200\n' | LC_ALL=C sed -n /./p | wc -l} 0 $ @kbd{printf '\200\n' | LC_ALL=en_US.ISO8859-1 sed -n /./p | wc -l} 1 @end example Portable @command{sed} regular expressions should use @samp{\} only to escape characters in the string @samp{$()*.0123456789[\^n@{@}}. For example, alternation, @samp{\|}, is common but Posix does not require its support, so it should be avoided in portable scripts. Solaris @command{sed} does not support alternation; e.g., @samp{sed '/a\|b/d'} deletes only lines that contain the literal string @samp{a|b}. Similarly, @samp{\+} and @samp{\?} should be avoided. Anchors (@samp{^} and @samp{$}) inside groups are not portable. Nested parentheses in patterns (e.g., @samp{\(\(a*\)b*)\)}) are quite portable to current hosts, but was not supported by some ancient @command{sed} implementations like SVR3. Some @command{sed} implementations, e.g., Solaris, restrict the special role of the asterisk @samp{*} to one-character regular expressions and back-references, and the special role of interval expressions @samp{\@{@var{m}\@}}, @samp{\@{@var{m},\@}}, or @samp{\@{@var{m},@var{n}\@}} to one-character regular expressions. This may lead to unexpected behavior: @example $ @kbd{echo '1*23*4' | /usr/bin/sed 's/\(.\)*/x/g'} x2x4 $ @kbd{echo '1*23*4' | /usr/xpg4/bin/sed 's/\(.\)*/x/g'} x @end example The @option{-e} option is mostly portable. However, its argument cannot start with @samp{a}, @samp{c}, or @samp{i}, as this runs afoul of a Tru64 5.1 bug. Also, its argument cannot be empty, as this fails on AIX 5.3. Some people prefer to use @samp{-e}: @example sed -e '@var{command-1}' \ -e '@var{command-2}' @end example @noindent as opposed to the equivalent: @example sed ' @var{command-1} @var{command-2} ' @end example @noindent The following usage is sometimes equivalent: @example sed '@var{command-1};@var{command-2}' @end example but Posix says that this use of a semicolon has undefined effect if @var{command-1}'s verb is @samp{@{}, @samp{a}, @samp{b}, @samp{c}, @samp{i}, @samp{r}, @samp{t}, @samp{w}, @samp{:}, or @samp{#}, so you should use semicolon only with simple scripts that do not use these verbs. Posix up to the 2008 revision requires the argument of the @option{-e} option to be a syntactically complete script. GNU @command{sed} allows to pass multiple script fragments, each as argument of a separate @option{-e} option, that are then combined, with newlines between the fragments, and a future Posix revision may allow this as well. This approach is not portable with script fragments ending in backslash; for example, the @command{sed} programs on Solaris 10, HP-UX 11, and AIX don't allow splitting in this case: @example $ @kbd{echo a | sed -n -e 'i\} @kbd{0'} 0 $ @kbd{echo a | sed -n -e 'i\' -e 0} Unrecognized command: 0 @end example @noindent In practice, however, this technique of joining fragments through @option{-e} works for multiple @command{sed} functions within @samp{@{} and @samp{@}}, even if that is not specified by Posix: @example @c The quote around the closing brace silences interactive zsh. $ @kbd{echo a | sed -n -e '/a/@{' -e s/a/b/ -e p -e '@}'} b @end example Commands inside @{ @} brackets are further restricted. Posix 2008 says that they cannot be preceded by addresses, @samp{!}, or @samp{;}, and that each command must be followed immediately by a newline, without any intervening blanks or semicolons. The closing bracket must be alone on a line, other than white space preceding or following it. However, a future version of Posix may standardize the use of addresses within brackets. Contrary to yet another urban legend, you may portably use @samp{&} in the replacement part of the @code{s} command to mean ``what was matched''. All descendants of Unix version 7 @command{sed} (at least; we don't have first hand experience with older @command{sed} implementations) have supported it. Posix requires that you must not have any white space between @samp{!} and the following command. It is OK to have blanks between the address and the @samp{!}. For instance, on Solaris: @example $ @kbd{echo "foo" | sed -n '/bar/ ! p'} @error{}Unrecognized command: /bar/ ! p $ @kbd{echo "foo" | sed -n '/bar/! p'} @error{}Unrecognized command: /bar/! p $ @kbd{echo "foo" | sed -n '/bar/ !p'} foo @end example Posix also says that you should not combine @samp{!} and @samp{;}. If you use @samp{!}, it is best to put it on a command that is delimited by newlines rather than @samp{;}. Also note that Posix requires that the @samp{b}, @samp{t}, @samp{r}, and @samp{w} commands be followed by exactly one space before their argument. On the other hand, no white space is allowed between @samp{:} and the subsequent label name. If a sed script is specified on the command line and ends in an @samp{a}, @samp{c}, or @samp{i} command, the last line of inserted text should be followed by a newline. Otherwise some @command{sed} implementations (e.g., OpenBSD 3.9) do not append a newline to the inserted text. Many @command{sed} implementations (e.g., MacOS X 10.4, OpenBSD 3.9, Solaris 10 @command{/usr/ucb/sed}) strip leading white space from the text of @samp{a}, @samp{c}, and @samp{i} commands. Prepend a backslash to work around this incompatibility with Posix: @example $ @kbd{echo flushleft | sed 'a\} > @kbd{ indented} > @kbd{'} flushleft indented $ @kbd{echo foo | sed 'a\} > @kbd{\ indented} > @kbd{'} flushleft indented @end example Posix requires that with an empty regular expression, the last non-empty regular expression from either an address specification or substitution command is applied. However, busybox 1.6.1 complains when using a substitution command with a replacement containing a back-reference to an empty regular expression; the workaround is repeating the regular expression. @example $ @kbd{echo abc | busybox sed '/a\(b\)c/ s//\1/'} sed: No previous regexp. $ @kbd{echo abc | busybox sed '/a\(b\)c/ s/a\(b\)c/\1/'} b @end example Portable scripts should be aware of the inconsistencies and options for handling word boundaries, as these are not specified by POSIX. @example \< \b [[:<:]] Solaris 10 yes no no Solaris XPG4 yes no error NetBSD 5.1 no no yes FreeBSD 9.1 no no yes GNU yes yes error busybox yes yes error @end example @item @command{sed} (@samp{t}) @c --------------------------- @prindex @command{sed} (@samp{t}) Some old systems have @command{sed} that ``forget'' to reset their @samp{t} flag when starting a new cycle. For instance on MIPS RISC/OS, and on IRIX 5.3, if you run the following @command{sed} script (the line numbers are not actual part of the texts): @example s/keep me/kept/g # a t end # b s/.*/deleted/g # c :end # d @end example @noindent on @example delete me # 1 delete me # 2 keep me # 3 delete me # 4 @end example @noindent you get @example deleted delete me kept deleted @end example @noindent instead of @example deleted deleted kept deleted @end example Why? When processing line 1, (c) matches, therefore sets the @samp{t} flag, and the output is produced. When processing line 2, the @samp{t} flag is still set (this is the bug). Command (a) fails to match, but @command{sed} is not supposed to clear the @samp{t} flag when a substitution fails. Command (b) sees that the flag is set, therefore it clears it, and jumps to (d), hence you get @samp{delete me} instead of @samp{deleted}. When processing line (3), @samp{t} is clear, (a) matches, so the flag is set, hence (b) clears the flags and jumps. Finally, since the flag is clear, line 4 is processed properly. There are two things one should remember about @samp{t} in @command{sed}. Firstly, always remember that @samp{t} jumps if @emph{some} substitution succeeded, not only the immediately preceding substitution. Therefore, always use a fake @samp{t clear} followed by a @samp{:clear} on the next line, to reset the @samp{t} flag where needed. Secondly, you cannot rely on @command{sed} to clear the flag at each new cycle. One portable implementation of the script above is: @example t clear :clear s/keep me/kept/g t end s/.*/deleted/g :end @end example @item @command{sleep} @c ------------------ @prindex @command{sleep} Using @command{sleep} is generally portable. However, remember that adding a @command{sleep} to work around timestamp issues, with a minimum granularity of one second, doesn't scale well for parallel builds on modern machines with sub-second process completion. @item @command{sort} @c ----------------- @prindex @command{sort} Remember that sort order is influenced by the current locale. Inside @file{configure}, the C locale is in effect, but in Makefile snippets, you may need to specify @code{LC_ALL=C sort}. @item @command{tar} @c ---------------- @prindex @command{tar} There are multiple file formats for @command{tar}; if you use Automake, the macro @code{AM_INIT_AUTOMAKE} has some options controlling which level of portability to use. @anchor{touch} @item @command{touch} @c ------------------ @prindex @command{touch} @cindex timestamp resolution If you specify the desired timestamp (e.g., with the @option{-r} option), older @command{touch} implementations use the @code{utime} or @code{utimes} system call, which can result in the same kind of timestamp truncation problems that @samp{cp -p} has. On ancient BSD systems, @command{touch} or any command that results in an empty file does not update the timestamps, so use a command like @command{echo} as a workaround. Also, GNU @command{touch} 3.16r (and presumably all before that) fails to work on SunOS 4.1.3 when the empty file is on an NFS-mounted 4.2 volume. However, these problems are no longer of practical concern. @item @command{tr} @c --------------- @prindex @command{tr} @cindex carriage return, deleting @cindex newline, deleting @cindex deleting carriage return Not all versions of @command{tr} handle all backslash character escapes. For example, Solaris 10 @command{/usr/ucb/tr} falls over, even though Solaris contains more modern @command{tr} in other locations. Using octal escapes is more portable for carriage returns, since @samp{\015} is the same for both ASCII and EBCDIC, and since use of literal carriage returns in scripts causes a number of other problems. But for other characters, like newline, using octal escapes ties the operation to ASCII, so it is better to use literal characters. @example $ @kbd{@{ echo moon; echo light; @} | /usr/ucb/tr -d '\n' ; echo} moo light $ @kbd{@{ echo moon; echo light; @} | /usr/bin/tr -d '\n' ; echo} moonlight $ @kbd{@{ echo moon; echo light; @} | /usr/ucb/tr -d '\012' ; echo} moonlight $ @kbd{nl='} @kbd{'; @{ echo moon; echo light; @} | /usr/ucb/tr -d "$nl" ; echo} moonlight @end example Not all versions of @command{tr} recognize direct ranges of characters: at least Solaris @command{/usr/bin/tr} still fails to do so. But you can use @command{/usr/xpg4/bin/tr} instead, or add brackets (which in Posix transliterate to themselves). @example $ @kbd{echo "Hazy Fantazy" | LC_ALL=C /usr/bin/tr a-z A-Z} HAZy FAntAZy $ @kbd{echo "Hazy Fantazy" | LC_ALL=C /usr/bin/tr '[a-z]' '[A-Z]'} HAZY FANTAZY $ @kbd{echo "Hazy Fantazy" | LC_ALL=C /usr/xpg4/bin/tr a-z A-Z} HAZY FANTAZY @end example When providing two arguments, be sure the second string is at least as long as the first. @example $ @kbd{echo abc | /usr/xpg4/bin/tr bc d} adc $ @kbd{echo abc | coreutils/tr bc d} add @end example Posix requires @command{tr} to operate on binary files. But at least Solaris @command{/usr/ucb/tr} and @command{/usr/bin/tr} silently discard @code{NUL} in the input prior to doing any translation. When using @command{tr} to process a binary file that may contain @code{NUL} bytes, it is necessary to use @command{/usr/xpg4/bin/tr} instead, or @command{/usr/xpg6/bin/tr} if that is available. @example $ @kbd{printf 'a\0b' | /usr/ucb/tr x x | od -An -tx1} 61 62 $ @kbd{printf 'a\0b' | /usr/bin/tr x x | od -An -tx1} 61 62 $ @kbd{printf 'a\0b' | /usr/xpg4/bin/tr x x | od -An -tx1} 61 00 62 @end example Solaris @command{/usr/ucb/tr} additionally fails to handle @samp{\0} as the octal escape for @code{NUL}. @example $ @kbd{printf 'abc' | /usr/ucb/tr 'bc' '\0d' | od -An -tx1} 61 62 63 $ @kbd{printf 'abc' | /usr/bin/tr 'bc' '\0d' | od -An -tx1} 61 00 64 $ @kbd{printf 'abc' | /usr/xpg4/bin/tr 'bc' '\0d' | od -An -tx1} 61 00 64 @end example @end table @node Portable Make @chapter Portable Make Programming @prindex @command{make} @cindex Limitations of @command{make} Writing portable makefiles is an art. Since a makefile's commands are executed by the shell, you must consider the shell portability issues already mentioned. However, other issues are specific to @command{make} itself. @menu * $< in Ordinary Make Rules:: $< in ordinary rules * Failure in Make Rules:: Failing portably in rules * Special Chars in Names:: Special Characters in Macro Names * Backslash-Newline-Empty:: Empty lines after backslash-newline * Backslash-Newline Comments:: Spanning comments across line boundaries * Long Lines in Makefiles:: Line length limitations * Macros and Submakes:: @code{make macro=value} and submakes * The Make Macro MAKEFLAGS:: @code{$(MAKEFLAGS)} portability issues * The Make Macro SHELL:: @code{$(SHELL)} portability issues * Parallel Make:: Parallel @command{make} quirks * Comments in Make Rules:: Other problems with Make comments * Newlines in Make Rules:: Using literal newlines in rules * Comments in Make Macros:: Other problems with Make comments in macros * Trailing whitespace in Make Macros:: Macro substitution problems * Command-line Macros and whitespace:: Whitespace trimming of values * obj/ and Make:: Don't name a subdirectory @file{obj} * make -k Status:: Exit status of @samp{make -k} * VPATH and Make:: @code{VPATH} woes * Single Suffix Rules:: Single suffix rules and separated dependencies * Timestamps and Make:: Sub-second timestamp resolution @end menu @node $< in Ordinary Make Rules @section @code{$<} in Ordinary Make Rules Posix says that the @samp{$<} construct in makefiles can be used only in inference rules and in the @samp{.DEFAULT} rule; its meaning in ordinary rules is unspecified. Solaris @command{make} for instance replaces it with the empty string. OpenBSD (3.0 and later) @command{make} diagnoses these uses and errors out. @node Failure in Make Rules @section Failure in Make Rules Posix 2008 requires that @command{make} must invoke each command with the equivalent of a @samp{sh -e -c} subshell, which causes the subshell to exit immediately if a subsidiary simple-command fails, although not all @command{make} implementations have historically followed this rule. For example, the command @samp{touch T; rm -f U} may attempt to remove @file{U} even if the @command{touch} fails, although this is not permitted with Posix make. One way to work around failures in simple commands is to reword them so that they always succeed, e.g., @samp{touch T || :; rm -f U}. However, even this approach can run into common bugs in BSD implementations of the @option{-e} option of @command{sh} and @command{set} (@pxref{set, , Limitations of Shell Builtins}), so if you are worried about porting to buggy BSD shells it may be simpler to migrate complicated @command{make} actions into separate scripts. @node Special Chars in Names @section Special Characters in Make Macro Names Posix limits macro names to nonempty strings containing only ASCII letters and digits, @samp{.}, and @samp{_}. Many @command{make} implementations allow a wider variety of characters, but portable makefiles should avoid them. It is portable to start a name with a special character, e.g., @samp{$(.FOO)}. Some ancient @command{make} implementations don't support leading underscores in macro names. An example is NEWS-OS 4.2R. @example $ @kbd{cat Makefile} _am_include = # _am_quote = all:; @@echo this is test $ @kbd{make} Make: Must be a separator on rules line 2. Stop. $ @kbd{cat Makefile2} am_include = # am_quote = all:; @@echo this is test $ @kbd{make -f Makefile2} this is test @end example @noindent However, this problem is no longer of practical concern. @node Backslash-Newline-Empty @section Backslash-Newline Before Empty Lines A bug in Bash 2.03 can cause problems if a Make rule contains a backslash-newline followed by line that expands to nothing. For example, on Solaris 8: @example SHELL = /bin/bash EMPTY = foo: touch foo \ $(EMPTY) @end example @noindent executes @example /bin/bash -c 'touch foo \ ' @end example @noindent which fails with a syntax error, due to the Bash bug. To avoid this problem, avoid nullable macros in the last line of a multi-line command. @c This has been seen on ia64 hpux 11.20, and on one hppa hpux 10.20, @c but another hppa hpux 10.20 didn't have it. Bob Proulx @c <bob@proulx.com> thinks it was in hpux 8.0 too. On some versions of HP-UX, @command{make} reads multiple newlines following a backslash, continuing to the next non-empty line. For example, @example FOO = one \ BAR = two test: : FOO is "$(FOO)" : BAR is "$(BAR)" @end example @noindent shows @code{FOO} equal to @code{one BAR = two}. Other implementations sensibly let a backslash continue only to the immediately following line. @node Backslash-Newline Comments @section Backslash-Newline in Make Comments According to Posix, Make comments start with @code{#} and continue until an unescaped newline is reached. @example $ @kbd{cat Makefile} # A = foo \ bar \ baz all: @@echo ok $ @kbd{make} # GNU make ok @end example @noindent However this is not always the case. Some implementations discard everything from @code{#} through the end of the line, ignoring any trailing backslash. @example $ @kbd{pmake} # BSD make "Makefile", line 3: Need an operator Fatal errors encountered -- cannot continue @end example @noindent Therefore, if you want to comment out a multi-line definition, prefix each line with @code{#}, not only the first. @example # A = foo \ # bar \ # baz @end example @node Long Lines in Makefiles @section Long Lines in Makefiles Tru64 5.1's @command{make} has been reported to crash when given a makefile with lines longer than around 20 kB. Earlier versions are reported to exit with @code{Line too long} diagnostics. @node Macros and Submakes @section @code{make macro=value} and Submakes A command-line variable definition such as @code{foo=bar} overrides any definition of @code{foo} in a makefile. Some @command{make} implementations (such as GNU @command{make}) propagate this override to subsidiary invocations of @command{make}. Some other implementations do not pass the substitution along to submakes. @example $ @kbd{cat Makefile} foo = foo one: @@echo $(foo) $(MAKE) two two: @@echo $(foo) $ @kbd{make foo=bar} # GNU make 3.79.1 bar make two make[1]: Entering directory `/home/adl' bar make[1]: Leaving directory `/home/adl' $ @kbd{pmake foo=bar} # BSD make bar pmake two foo @end example You have a few possibilities if you do want the @code{foo=bar} override to propagate to submakes. One is to use the @option{-e} option, which causes all environment variables to have precedence over the makefile macro definitions, and declare foo as an environment variable: @example $ @kbd{env foo=bar make -e} @end example The @option{-e} option is propagated to submakes automatically, and since the environment is inherited between @command{make} invocations, the @code{foo} macro is overridden in submakes as expected. This syntax (@code{foo=bar make -e}) is portable only when used outside of a makefile, for instance from a script or from the command line. When run inside a @command{make} rule, GNU @command{make} 3.80 and prior versions forget to propagate the @option{-e} option to submakes. Moreover, using @option{-e} could have unexpected side effects if your environment contains some other macros usually defined by the makefile. (See also the note about @code{make -e} and @code{SHELL} below.) If you can foresee all macros that a user might want to override, then you can propagate them to submakes manually, from your makefile: @example foo = foo one: @@echo $(foo) $(MAKE) foo=$(foo) two two: @@echo $(foo) @end example Another way to propagate a variable to submakes in a portable way is to expand an extra variable in every invocation of @samp{$(MAKE)} within your makefile: @example foo = foo one: @@echo $(foo) $(MAKE) $(SUBMAKEFLAGS) two two: @@echo $(foo) @end example Users must be aware that this technique is in use to take advantage of it, e.g.@: with @code{make foo=bar SUBMAKEFLAGS='foo=bar'}, but it allows any macro to be overridden. Makefiles generated by @command{automake} use this technique, expanding @code{$(AM_MAKEFLAGS)} on the command lines of submakes (@pxref{Subdirectories, , Automake, automake, GNU Automake}). @node The Make Macro MAKEFLAGS @section The Make Macro MAKEFLAGS @cindex @code{MAKEFLAGS} and @command{make} @cindex @command{make} and @code{MAKEFLAGS} Posix requires @command{make} to use @code{MAKEFLAGS} to affect the current and recursive invocations of make, but allows implementations several formats for the variable. It is tricky to parse @code{$MAKEFLAGS} to determine whether @option{-s} for silent execution or @option{-k} for continued execution are in effect. For example, you cannot assume that the first space-separated word in @code{$MAKEFLAGS} contains single-letter options, since in the Cygwin version of GNU @command{make} it is either @option{--unix} or @option{--win32} with the second word containing single-letter options. @example $ @kbd{cat Makefile} all: @@echo MAKEFLAGS = $(MAKEFLAGS) $ @kbd{make} MAKEFLAGS = --unix $ @kbd{make -k} MAKEFLAGS = --unix -k @end example @node The Make Macro SHELL @section The Make Macro @code{SHELL} @cindex @code{SHELL} and @command{make} @cindex @command{make} and @code{SHELL} Posix-compliant @command{make} internally uses the @code{$(SHELL)} macro to spawn shell processes and execute Make rules. This is a builtin macro supplied by @command{make}, but it can be modified by a makefile or by a command-line argument. Not all @command{make} implementations define this @code{SHELL} macro. Tru64 @command{make} is an example; this implementation always uses @code{/bin/sh}. So it's a good idea to always define @code{SHELL} in your makefiles. If you use Autoconf, do @example SHELL = @@SHELL@@ @end example @noindent If you use Automake, this is done for you. Do not force @code{SHELL = /bin/sh} because that is not correct everywhere. Remember, @file{/bin/sh} is not Posix compliant on many systems, such as FreeBSD 4, NetBSD 3, AIX 3, Solaris 10, or Tru64. Additionally, DJGPP lacks @code{/bin/sh}, and when its GNU @command{make} port sees such a setting it enters a special emulation mode where features like pipes and redirections are emulated on top of DOS's @command{command.com}. Unfortunately this emulation is incomplete; for instance it does not handle command substitutions. Using @code{@@SHELL@@} means that your makefile will benefit from the same improved shell, such as @command{bash} or @command{ksh}, that was discovered during @command{configure}, so that you aren't fighting two different sets of shell bugs between the two contexts. Posix-compliant @command{make} should never acquire the value of $(SHELL) from the environment, even when @code{make -e} is used (otherwise, think about what would happen to your rules if @code{SHELL=/bin/tcsh}). However not all @command{make} implementations have this exception. For instance it's not surprising that Tru64 @command{make} doesn't protect @code{SHELL}, since it doesn't use it. @example $ @kbd{cat Makefile} SHELL = /bin/sh FOO = foo all: @@echo $(SHELL) @@echo $(FOO) $ @kbd{env SHELL=/bin/tcsh FOO=bar make -e} # Tru64 Make /bin/tcsh bar $ @kbd{env SHELL=/bin/tcsh FOO=bar gmake -e} # GNU make /bin/sh bar @end example Conversely, @command{make} is not supposed to export any changes to the macro @code{SHELL} to child processes. Again, many implementations break this rule: @example $ @kbd{cat Makefile} all: @@echo $(SHELL) @@printenv SHELL $ @kbd{env SHELL=sh make -e SHELL=/bin/ksh} # BSD Make, GNU make 3.80 /bin/ksh /bin/ksh $ @kbd{env SHELL=sh gmake -e SHELL=/bin/ksh} # GNU make 3.81 /bin/ksh sh @end example @node Parallel Make @section Parallel Make @cindex Parallel @command{make} Support for parallel execution in @command{make} implementation varies. Generally, using GNU make is your best bet. When NetBSD or FreeBSD @command{make} are run in parallel mode, they will reuse the same shell for multiple commands within one recipe. This can have various unexpected consequences. For example, changes of directories or variables persist between recipes, so that: @example all: @@var=value; cd /; pwd; echo $$var; echo $$$$ @@pwd; echo $$var; echo $$$$ @end example @noindent may output the following with @code{make -j1}, at least on NetBSD up to 5.1 and FreeBSD up to 8.2: @example / value 32235 / value 32235 @end example @noindent while without @option{-j1}, or with @option{-B}, the output looks less surprising: @example / value 32238 /tmp 32239 @end example @noindent Another consequence is that, if one command in a recipe uses @code{exit 0} to indicate a successful exit, the shell will be gone and the remaining commands of this recipe will not be executed. The BSD @command{make} implementations, when run in parallel mode, will also pass the @command{Makefile} recipes to the shell through its standard input, thus making it unusable from the recipes: @example $ @kbd{cat Makefile} read: @@read line; echo LINE: $$line @c $$ @c restore font-lock $ @kbd{echo foo | make read} LINE: foo $ @kbd{echo foo | make -j1 read} # NetBSD 5.1 and FreeBSD 8.2 LINE: @end example @noindent Moreover, when FreeBSD @command{make} (up at least to 8.2) is run in parallel mode, it implements the @code{@@} and @code{-} ``recipe modifiers'' by dynamically modifying the active shell flags. This behavior has the effects of potentially clobbering the exit status of recipes silenced with the @code{@@} modifier if they also unset the @option{errexit} shell flag, and of mangling the output in unexpected ways: @example $ @kbd{cat Makefile} a: @@echo $$-; set +e; false b: -echo $$-; false; echo set - $ @kbd{make a; echo status: $?} ehBc *** Error code 1 status: 1 $ @kbd{make -j1 a; echo status: $?} ehB status: 0 $ @kbd{make b} echo $-; echo set - hBc set - $ @kbd{make -j1 b} echo $-; echo hvB @end example @noindent You can avoid all these issues by using the @option{-B} option to enable compatibility semantics. However, that will effectively also disable all parallelism as that will cause prerequisites to be updated in the order they are listed in a rule. Some make implementations (among them, FreeBSD @command{make}, NetBSD @command{make}, and Solaris @command{dmake}), when invoked with a @option{-j@var{N}} option, connect the standard output and standard error of all their child processes to pipes or temporary regular files. This can lead to subtly different semantics in the behavior of the spawned processes. For example, even if the @command{make} standard output is connected to a tty, the recipe command will not be: @example $ @kbd{cat Makefile} all: @@test -t 1 && echo "Is a tty" || echo "Is not a tty" $ @kbd{make -j 2} # FreeBSD 8.2 make Is not a tty $ @kbd{make -j 2} # NetBSD 5.1 make --- all --- Is not a tty $ @kbd{dmake -j 2} # Solaris 10 dmake @var{hostname} --> 1 job @var{hostname} --> Job output Is not a tty @end example @noindent On the other hand: @example $ @kbd{make -j 2} # GNU make, Heirloom make Is a tty @end example @noindent The above examples also show additional status output produced in parallel mode for targets being updated by Solaris @command{dmake} and NetBSD @command{make} (but @emph{not} by FreeBSD @command{make}). Furthermore, parallel runs of those @command{make} implementations will route standard error from commands that they spawn into their own standard output, and may remove leading whitespace from output lines. @node Comments in Make Rules @section Comments in Make Rules @cindex Comments in @file{Makefile} rules @cindex @file{Makefile} rules and comments Never put comments in a rule. Some @command{make} treat anything starting with a tab as a command for the current rule, even if the tab is immediately followed by a @code{#}. The @command{make} from Tru64 Unix V5.1 is one of them. The following makefile runs @code{# foo} through the shell. @example all: # foo @end example As a workaround, you can use the @command{:} no-op command with a string argument that gets ignored: @example all: : "foo" @end example Conversely, if you want to use the @samp{#} character in some command, you can only do so by expanding it inside a rule (@pxref{Comments in Make Macros}). So for example, if @samp{COMMENT_CHAR} is substituted by @command{config.status} as @samp{#}, then the following substitutes @samp{@@COMMENT_CHAR@@} in a generated header: @example foo.h: foo.h.in sed -e 's|@@''COMMENT_CHAR''@@|@@COMMENT_CHAR@@|g' \ $(srcdir)/foo.h.in > $@@ @end example The funny shell quoting avoids a substitution at @command{config.status} run time of the left-hand side of the @command{sed} @samp{s} command. @node Newlines in Make Rules @section Newlines in Make Rules @cindex Newlines in @file{Makefile} rules @cindex @file{Makefile} rules and newlines In shell scripts, newlines can be used inside string literals. But in the shell statements of @file{Makefile} rules, this is not possible: A newline not preceded by a backslash is a separator between shell statements. Whereas a newline that is preceded by a backslash becomes part of the shell statement according to POSIX, but gets replaced, together with the backslash that precedes it, by a space in GNU @command{make} 3.80 and older. So, how can a newline be used in a string literal? The trick is to set up a shell variable that contains a newline: @example nlinit=`echo 'nl="'; echo '"'`; eval "$$nlinit" @end example For example, in order to create a multi-line @samp{sed} expression that inserts a blank line after every line of a file, this code can be used: @example nlinit=`echo 'nl="'; echo '"'`; eval "$$nlinit"; \ sed -e "s/\$$/\\$$@{nl@}/" < input > output @end example @node Comments in Make Macros @section Comments in Make Macros @cindex Comments in @file{Makefile} macros @cindex @file{Makefile} macros and comments Avoid putting comments in macro values as far as possible. Posix specifies that the text starting from the @samp{#} sign until the end of the line is to be ignored, which has the unfortunate effect of disallowing them even within quotes. Thus, the following might lead to a syntax error at compile time: @example CPPFLAGS = "-DCOMMENT_CHAR='#'" @end example @noindent as @samp{CPPFLAGS} may be expanded to @samp{"-DCOMMENT_CHAR='}. Most @command{make} implementations disregard this and treat single and double quotes specially here. Also, GNU @command{make} lets you put @samp{#} into a macro value by escaping it with a backslash, i.e., @samp{\#}. However, neither of these usages are portable. @xref{Comments in Make Rules}, for a portable alternative. Even without quoting involved, comments can have surprising effects, because the whitespace before them is part of the variable value: @example foo = bar # trailing comment print: ; @@echo "$(foo)." @end example @noindent prints @samp{bar .}, which is usually not intended, and can expose @command{make} bugs as described below. @node Trailing whitespace in Make Macros @section Trailing whitespace in Make Macros @cindex whitespace in @file{Makefile} macros @cindex @file{Makefile} macros and whitespace GNU @command{make} 3.80 mistreats trailing whitespace in macro substitutions and appends another spurious suffix: @example empty = foo = bar $(empty) print: ; @@echo $(foo:=.test) @end example @noindent prints @samp{bar.test .test}. BSD and Solaris @command{make} implementations do not honor trailing whitespace in macro definitions as Posix requires: @example foo = bar # Note the space after "bar". print: ; @@echo $(foo)t @end example @noindent prints @samp{bart} instead of @samp{bar t}. To work around this, you can use a helper macro as in the previous example. @node Command-line Macros and whitespace @section Command-line Macros and whitespace @cindex whitespace in command-line macros @cindex command-line, macros set on @cindex environment, macros set from Some @command{make} implementations may strip trailing whitespace off of macros set on the command line in addition to leading whitespace. Further, some may strip leading whitespace off of macros set from environment variables: @example $ @kbd{echo 'print: ; @@echo "x$(foo)x$(bar)x"' | foo=' f f ' make -f - bar=' b b '} x f f xb b x # AIX, BSD, GNU make xf f xb b x # HP-UX, IRIX, Tru64/OSF make x f f xb bx # Solaris make @end example @node obj/ and Make @section The @file{obj/} Subdirectory and Make @cindex @file{obj/}, subdirectory @cindex BSD @command{make} and @file{obj/} Never name one of your subdirectories @file{obj/} if you don't like surprises. If an @file{obj/} directory exists, BSD @command{make} enters it before reading the makefile. Hence the makefile in the current directory is not read. @example $ @kbd{cat Makefile} all: echo Hello $ @kbd{cat obj/Makefile} all: echo World $ @kbd{make} # GNU make echo Hello Hello $ @kbd{pmake} # BSD make echo World World @end example @node make -k Status @section Exit Status of @code{make -k} @cindex @code{make -k} Do not rely on the exit status of @code{make -k}. Some implementations reflect whether they encountered an error in their exit status; other implementations always succeed. @example $ @kbd{cat Makefile} all: false $ @kbd{make -k; echo exit status: $?} # GNU make false make: *** [all] Error 1 exit status: 2 $ @kbd{pmake -k; echo exit status: $?} # BSD make false *** Error code 1 (continuing) exit status: 0 @end example @node VPATH and Make @section @code{VPATH} and Make @cindex @code{VPATH} Posix does not specify the semantics of @code{VPATH}. Typically, @command{make} supports @code{VPATH}, but its implementation is not consistent. Autoconf and Automake support makefiles whose usages of @code{VPATH} are portable to recent-enough popular implementations of @command{make}, but to keep the resulting makefiles portable, a package's makefile prototypes must take the following issues into account. These issues are complicated and are often poorly understood, and installers who use @code{VPATH} should expect to find many bugs in this area. If you use @code{VPATH}, the simplest way to avoid these portability bugs is to stick with GNU @command{make}, since it is the most commonly-used @command{make} among Autoconf users. Here are some known issues with some @code{VPATH} implementations. @menu * Variables listed in VPATH:: @code{VPATH} must be literal on ancient hosts * VPATH and Double-colon:: Problems with @samp{::} on ancient hosts * $< in Explicit Rules:: @code{$<} does not work in ordinary rules * Automatic Rule Rewriting:: @code{VPATH} goes wild on Solaris * Tru64 Directory Magic:: @command{mkdir} goes wild on Tru64 * Make Target Lookup:: More details about @code{VPATH} lookup @end menu @node Variables listed in VPATH @subsection Variables listed in @code{VPATH} @cindex @code{VPATH} and variables @cindex variables and @code{VPATH} Do not set @code{VPATH} to the value of another variable, for example @samp{VPATH = $(srcdir)}, because some ancient versions of @command{make} do not do variable substitutions on the value of @code{VPATH}. For example, use this @example srcdir = @@srcdir@@ VPATH = @@srcdir@@ @end example @noindent rather than @samp{VPATH = $(srcdir)}. Note that with GNU Automake, there is no need to set this yourself. @node VPATH and Double-colon @subsection @code{VPATH} and Double-colon Rules @cindex @code{VPATH} and double-colon rules @cindex double-colon rules and @code{VPATH} With ancient versions of Sun @command{make}, any assignment to @code{VPATH} causes @command{make} to execute only the first set of double-colon rules. However, this problem is no longer of practical concern. @node $< in Explicit Rules @subsection @code{$<} Not Supported in Explicit Rules @cindex explicit rules, @code{$<}, and @code{VPATH} @cindex @code{$<}, explicit rules, and @code{VPATH} @cindex @code{VPATH}, explicit rules, and @code{$<} Using @code{$<} in explicit rules is not portable. The prerequisite file must be named explicitly in the rule. If you want to find the prerequisite via a @code{VPATH} search, you have to code the whole thing manually. @xref{Build Directories}. @node Automatic Rule Rewriting @subsection Automatic Rule Rewriting @cindex @code{VPATH} and automatic rule rewriting @cindex automatic rule rewriting and @code{VPATH} Some @command{make} implementations, such as Solaris and Tru64, search for prerequisites in @code{VPATH} and then rewrite each occurrence as a plain word in the rule. For instance: @example # This isn't portable to GNU make. VPATH = ../pkg/src f.c: if.c cp if.c f.c @end example @noindent executes @code{cp ../pkg/src/if.c f.c} if @file{if.c} is found in @file{../pkg/src}. However, this rule leads to real problems in practice. For example, if the source directory contains an ordinary file named @file{test} that is used in a dependency, Solaris @command{make} rewrites commands like @samp{if test -r foo; @dots{}} to @samp{if ../pkg/src/test -r foo; @dots{}}, which is typically undesirable. In fact, @command{make} is completely unaware of shell syntax used in the rules, so the VPATH rewrite can potentially apply to @emph{any} whitespace-separated word in a rule, including shell variables, functions, and keywords. @example $ @kbd{mkdir build} $ @kbd{cd build} $ @kbd{cat > Makefile <<'END'} VPATH = .. all: arg func for echo func () @{ for arg in "$$@@"; do echo $$arg; done; @}; \ func "hello world" END $ @kbd{touch ../arg ../func ../for ../echo} $ @kbd{make} ../func () @{ ../for ../arg in "$@@"; do ../echo $arg; done; @}; \ ../func "hello world" sh: syntax error at line 1: `do' unexpected *** Error code 2 @end example @noindent To avoid this problem, portable makefiles should never mention a source file or dependency whose name is that of a shell keyword like @file{for} or @file{until}, a shell command like @command{cat} or @command{gcc} or @command{test}, or a shell function or variable used in the corresponding @command{Makefile} recipe. Because of these problems GNU @command{make} and many other @command{make} implementations do not rewrite commands, so portable makefiles should search @code{VPATH} manually. It is tempting to write this: @smallexample # This isn't portable to Solaris make. VPATH = ../pkg/src f.c: if.c cp `test -f if.c || echo $(VPATH)/`if.c f.c @end smallexample @noindent However, the ``prerequisite rewriting'' still applies here. So if @file{if.c} is in @file{../pkg/src}, Solaris and Tru64 @command{make} execute @smallexample cp `test -f ../pkg/src/if.c || echo ../pkg/src/`if.c f.c @end smallexample @noindent which reduces to @example cp if.c f.c @end example @noindent and thus fails. Oops. A simple workaround, and good practice anyway, is to use @samp{$?} and @samp{$@@} when possible: @smallexample VPATH = ../pkg/src f.c: if.c cp $? $@@ @end smallexample @noindent but this does not generalize well to commands with multiple prerequisites. A more general workaround is to rewrite the rule so that the prerequisite @file{if.c} never appears as a plain word. For example, these three rules would be safe, assuming @file{if.c} is in @file{../pkg/src} and the other files are in the working directory: @smallexample VPATH = ../pkg/src f.c: if.c f1.c cat `test -f ./if.c || echo $(VPATH)/`if.c f1.c >$@@ g.c: if.c g1.c cat `test -f 'if.c' || echo $(VPATH)/`if.c g1.c >$@@ h.c: if.c h1.c cat `test -f "if.c" || echo $(VPATH)/`if.c h1.c >$@@ @end smallexample Things get worse when your prerequisites are in a macro. @example VPATH = ../pkg/src HEADERS = f.h g.h h.h install-HEADERS: $(HEADERS) for i in $(HEADERS); do \ $(INSTALL) -m 644 \ `test -f $$i || echo $(VPATH)/`$$i \ $(DESTDIR)$(includedir)/$$i; \ @c $$ restore font-lock done @end example The above @code{install-HEADERS} rule is not Solaris-proof because @code{for i in $(HEADERS);} is expanded to @code{for i in f.h g.h h.h;} where @code{f.h} and @code{g.h} are plain words and are hence subject to @code{VPATH} adjustments. If the three files are in @file{../pkg/src}, the rule is run as: @example for i in ../pkg/src/f.h ../pkg/src/g.h h.h; do \ install -m 644 \ `test -f $i || echo ../pkg/src/`$i \ /usr/local/include/$i; \ done @end example where the two first @command{install} calls fail. For instance, consider the @code{f.h} installation: @example install -m 644 \ `test -f ../pkg/src/f.h || \ echo ../pkg/src/ \ `../pkg/src/f.h \ /usr/local/include/../pkg/src/f.h; @end example @noindent It reduces to: @example install -m 644 \ ../pkg/src/f.h \ /usr/local/include/../pkg/src/f.h; @end example Note that the manual @code{VPATH} search did not cause any problems here; however this command installs @file{f.h} in an incorrect directory. Trying to quote @code{$(HEADERS)} in some way, as we did for @code{foo.c} a few makefiles ago, does not help: @example install-HEADERS: $(HEADERS) headers='$(HEADERS)'; \ for i in $$headers; do \ $(INSTALL) -m 644 \ `test -f $$i || echo $(VPATH)/`$$i \ $(DESTDIR)$(includedir)/$$i; \ done @end example Now, @code{headers='$(HEADERS)'} macro-expands to: @example headers='f.h g.h h.h' @end example @noindent but @code{g.h} is still a plain word. (As an aside, the idiom @code{headers='$(HEADERS)'; for i in $$headers;} is a good idea if @code{$(HEADERS)} can be empty, because some shells diagnose a syntax error on @code{for i in;}.) One workaround is to strip this unwanted @file{../pkg/src/} prefix manually: @example VPATH = ../pkg/src HEADERS = f.h g.h h.h install-HEADERS: $(HEADERS) headers='$(HEADERS)'; \ for i in $$headers; do \ i=`expr "$$i" : '$(VPATH)/\(.*\)'`; $(INSTALL) -m 644 \ `test -f $$i || echo $(VPATH)/`$$i \ $(DESTDIR)$(includedir)/$$i; \ @c $$ restore font-lock done @end example Automake does something similar. However the above hack works only if the files listed in @code{HEADERS} are in the current directory or a subdirectory; they should not be in an enclosing directory. If we had @code{HEADERS = ../f.h}, the above fragment would fail in a VPATH build with Tru64 @command{make}. The reason is that not only does Tru64 @command{make} rewrite dependencies, but it also simplifies them. Hence @code{../f.h} becomes @code{../pkg/f.h} instead of @code{../pkg/src/../f.h}. This obviously defeats any attempt to strip a leading @file{../pkg/src/} component. The following example makes the behavior of Tru64 @command{make} more apparent. @example $ @kbd{cat Makefile} VPATH = sub all: ../foo echo ../foo $ @kbd{ls} Makefile foo $ @kbd{make} echo foo foo @end example @noindent Dependency @file{../foo} was found in @file{sub/../foo}, but Tru64 @command{make} simplified it as @file{foo}. (Note that the @file{sub/} directory does not even exist, this just means that the simplification occurred before the file was checked for.) For the record here is how SunOS 4 @command{make} behaves on this example. @smallexample $ @kbd{make} make: Fatal error: Don't know how to make target `../foo' $ @kbd{mkdir sub} $ @kbd{make} echo sub/../foo sub/../foo @end smallexample @node Tru64 Directory Magic @subsection Tru64 @command{make} Creates Prerequisite Directories Magically @cindex @code{VPATH} and prerequisite directories @cindex prerequisite directories and @code{VPATH} When a prerequisite is a subdirectory of @code{VPATH}, Tru64 @command{make} creates it in the current directory. @example $ @kbd{mkdir -p foo/bar build} $ @kbd{cd build} $ @kbd{cat >Makefile <<END VPATH = .. all: foo/bar END} $ @kbd{make} mkdir foo mkdir foo/bar @end example This can yield unexpected results if a rule uses a manual @code{VPATH} search as presented before. @example VPATH = .. all : foo/bar command `test -d foo/bar || echo ../`foo/bar @end example The above @command{command} is run on the empty @file{foo/bar} directory that was created in the current directory. @node Make Target Lookup @subsection Make Target Lookup @cindex @code{VPATH}, resolving target pathnames GNU @command{make} uses a complex algorithm to decide when it should use files found via a @code{VPATH} search. @xref{Search Algorithm, , How Directory Searches are Performed, make, The GNU Make Manual}. If a target needs to be rebuilt, GNU @command{make} discards the file name found during the @code{VPATH} search for this target, and builds the file locally using the file name given in the makefile. If a target does not need to be rebuilt, GNU @command{make} uses the file name found during the @code{VPATH} search. Other @command{make} implementations, like NetBSD @command{make}, are easier to describe: the file name found during the @code{VPATH} search is used whether the target needs to be rebuilt or not. Therefore new files are created locally, but existing files are updated at their @code{VPATH} location. OpenBSD and FreeBSD @command{make}, however, never perform a @code{VPATH} search for a dependency that has an explicit rule. This is extremely annoying. When attempting a @code{VPATH} build for an autoconfiscated package (e.g., @code{mkdir build && cd build && ../configure}), this means GNU @command{make} builds everything locally in the @file{build} directory, while BSD @command{make} builds new files locally and updates existing files in the source directory. @example $ @kbd{cat Makefile} VPATH = .. all: foo.x bar.x foo.x bar.x: newer.x @@echo Building $@@ $ @kbd{touch ../bar.x} $ @kbd{touch ../newer.x} $ @kbd{make} # GNU make Building foo.x Building bar.x $ @kbd{pmake} # NetBSD make Building foo.x Building ../bar.x $ @kbd{fmake} # FreeBSD make, OpenBSD make Building foo.x Building bar.x $ @kbd{tmake} # Tru64 make Building foo.x Building bar.x $ @kbd{touch ../bar.x} $ @kbd{make} # GNU make Building foo.x $ @kbd{pmake} # NetBSD make Building foo.x $ @kbd{fmake} # FreeBSD make, OpenBSD make Building foo.x Building bar.x $ @kbd{tmake} # Tru64 make Building foo.x Building bar.x @end example Note how NetBSD @command{make} updates @file{../bar.x} in its VPATH location, and how FreeBSD, OpenBSD, and Tru64 @command{make} always update @file{bar.x}, even when @file{../bar.x} is up to date. Another point worth mentioning is that once GNU @command{make} has decided to ignore a @code{VPATH} file name (e.g., it ignored @file{../bar.x} in the above example) it continues to ignore it when the target occurs as a prerequisite of another rule. The following example shows that GNU @command{make} does not look up @file{bar.x} in @code{VPATH} before performing the @code{.x.y} rule, because it ignored the @code{VPATH} result of @file{bar.x} while running the @code{bar.x: newer.x} rule. @example $ @kbd{cat Makefile} VPATH = .. all: bar.y bar.x: newer.x @@echo Building $@@ .SUFFIXES: .x .y .x.y: cp $< $@@ $ @kbd{touch ../bar.x} $ @kbd{touch ../newer.x} $ @kbd{make} # GNU make Building bar.x cp bar.x bar.y cp: cannot stat `bar.x': No such file or directory make: *** [bar.y] Error 1 $ @kbd{pmake} # NetBSD make Building ../bar.x cp ../bar.x bar.y $ @kbd{rm bar.y} $ @kbd{fmake} # FreeBSD make, OpenBSD make echo Building bar.x cp bar.x bar.y cp: cannot stat `bar.x': No such file or directory *** Error code 1 $ @kbd{tmake} # Tru64 make Building bar.x cp: bar.x: No such file or directory *** Exit 1 @end example Note that if you drop away the command from the @code{bar.x: newer.x} rule, GNU @command{make} magically starts to work: it knows that @code{bar.x} hasn't been updated, therefore it doesn't discard the result from @code{VPATH} (@file{../bar.x}) in succeeding uses. Tru64 also works, but FreeBSD and OpenBSD still don't. @example $ @kbd{cat Makefile} VPATH = .. all: bar.y bar.x: newer.x .SUFFIXES: .x .y .x.y: cp $< $@@ $ @kbd{touch ../bar.x} $ @kbd{touch ../newer.x} $ @kbd{make} # GNU make cp ../bar.x bar.y $ @kbd{rm bar.y} $ @kbd{pmake} # NetBSD make cp ../bar.x bar.y $ @kbd{rm bar.y} $ @kbd{fmake} # FreeBSD make, OpenBSD make cp bar.x bar.y cp: cannot stat `bar.x': No such file or directory *** Error code 1 $ @kbd{tmake} # Tru64 make cp ../bar.x bar.y @end example It seems the sole solution that would please every @command{make} implementation is to never rely on @code{VPATH} searches for targets. In other words, @code{VPATH} should be reserved to sources that are not built. @node Single Suffix Rules @section Single Suffix Rules and Separated Dependencies @cindex Single Suffix Inference Rule @cindex Rule, Single Suffix Inference A @dfn{Single Suffix Rule} is basically a usual suffix (inference) rule (@samp{.from.to:}), but which @emph{destination} suffix is empty (@samp{.from:}). @cindex Separated Dependencies @dfn{Separated dependencies} simply refers to listing the prerequisite of a target, without defining a rule. Usually one can list on the one hand side, the rules, and on the other hand side, the dependencies. Solaris @command{make} does not support separated dependencies for targets defined by single suffix rules: @example $ @kbd{cat Makefile} .SUFFIXES: .in foo: foo.in .in: cp $< $@@ $ @kbd{touch foo.in} $ @kbd{make} $ @kbd{ls} Makefile foo.in @end example @noindent while GNU Make does: @example $ @kbd{gmake} cp foo.in foo $ @kbd{ls} Makefile foo foo.in @end example Note it works without the @samp{foo: foo.in} dependency. @example $ @kbd{cat Makefile} .SUFFIXES: .in .in: cp $< $@@ $ @kbd{make foo} cp foo.in foo @end example @noindent and it works with double suffix inference rules: @example $ @kbd{cat Makefile} foo.out: foo.in .SUFFIXES: .in .out .in.out: cp $< $@@ $ @kbd{make} cp foo.in foo.out @end example As a result, in such a case, you have to write target rules. @node Timestamps and Make @section Timestamp Resolution and Make @cindex timestamp resolution Traditionally, file timestamps had 1-second resolution, and @command{make} used those timestamps to determine whether one file was newer than the other. However, many modern file systems have timestamps with 1-nanosecond resolution. Some @command{make} implementations look at the entire timestamp; others ignore the fractional part, which can lead to incorrect results. Normally this is not a problem, but in some extreme cases you may need to use tricks like @samp{sleep 1} to work around timestamp truncation bugs. Commands like @samp{cp -p} and @samp{touch -r} typically do not copy file timestamps to their full resolutions (@pxref{touch, , Limitations of Usual Tools}). Hence you should be wary of rules like this: @example dest: src cp -p src dest @end example as @file{dest} often appears to be older than @file{src} after the timestamp is truncated, and this can cause @command{make} to do needless rework the next time it is invoked. To work around this problem, you can use a timestamp file, e.g.: @example dest-stamp: src cp -p src dest date >dest-stamp @end example Apart from timestamp resolution, there are also differences in handling equal timestamps. HP-UX @command{make} updates targets if it has the same timestamp as one of its prerequisites, in violation of Posix rules. This can cause spurious rebuilds for repeated runs of @command{make}. This in turn can cause @command{make} to fail if it tries to rebuild generated files in a possibly read-only source tree with tools not present on the end-user machine. Use GNU @command{make} instead. @c ======================================== Portable C and C++ Programming @node Portable C and C++ @chapter Portable C and C++ Programming @cindex Portable C and C++ programming C and C++ programs often use low-level features of the underlying system, and therefore are often more difficult to make portable to other platforms. Several standards have been developed to help make your programs more portable. If you write programs with these standards in mind, you can have greater confidence that your programs work on a wide variety of systems. @ifhtml @uref{https://@/gcc.gnu.org/@/onlinedocs/@/gcc/@/Standards.html, Language Standards Supported by GCC} @end ifhtml @ifnothtml @xref{Standards, , Language Standards Supported by GCC, gcc, Using the GNU Compiler Collection (GCC)}, @end ifnothtml for a list of C-related standards. Many programs also assume the @uref{https://@/en.wikipedia.org/@/wiki/@/POSIX, Posix standard}. Some old code is written to be portable to K&R C, which predates any C standard. K&R C compilers are no longer of practical interest, though, and the rest of section assumes at least C89, the first C standard. Program portability is a huge topic, and this section can only briefly introduce common pitfalls. @xref{System Portability, , Portability between System Types, standards, The GNU Coding Standards}, for more information. @menu * Varieties of Unportability:: How to make your programs unportable * Integer Overflow:: When integers get too large * Preprocessor Arithmetic:: @code{#if} expression problems * Null Pointers:: Properties of null pointers * Buffer Overruns:: Subscript errors and the like * Volatile Objects:: @code{volatile} and signals * Floating Point Portability:: Portable floating-point arithmetic * Exiting Portably:: Exiting and the exit status @end menu @node Varieties of Unportability @section Varieties of Unportability @cindex portability Autoconf tests and ordinary programs often need to test what is allowed on a system, and therefore they may need to deliberately exceed the boundaries of what the standards allow, if only to see whether an optional feature is present. When you write such a program, you should keep in mind the difference between constraints, unspecified behavior, and undefined behavior. In C, a @dfn{constraint} is a rule that the compiler must enforce. An example constraint is that C programs must not declare a bit-field with negative width. Tests can therefore reliably assume that programs with negative-width bit-fields are rejected by a compiler that conforms to the standard. @dfn{Unspecified behavior} is valid behavior, where the standard allows multiple possibilities. For example, the order of evaluation of function arguments is unspecified. Some unspecified behavior is @dfn{implementation-defined}, i.e., documented by the implementation, but since Autoconf tests cannot read the documentation they cannot distinguish between implementation-defined and other unspecified behavior. It is common for Autoconf tests to probe implementations to determine otherwise-unspecified behavior. @dfn{Undefined behavior} is invalid behavior, where the standard allows the implementation to do anything it pleases. For example, dereferencing a null pointer leads to undefined behavior. If possible, test programs should avoid undefined behavior, since a program with undefined behavior might succeed on a test that should fail. The above rules apply to programs that are intended to conform to the standard. However, strictly-conforming programs are quite rare, since the standards are so limiting. A major goal of Autoconf is to support programs that use implementation features not described by the standard, and it is fairly common for test programs to violate the above rules, if the programs work well enough in practice. @node Integer Overflow @section Integer Overflow @cindex integer overflow @cindex overflow, signed integer @cindex signed integer overflow @cindex wraparound arithmetic In practice many portable C programs assume that signed integer overflow wraps around reliably using two's complement arithmetic. Yet the C standard says that program behavior is undefined on overflow, and in a few cases C programs do not work on some modern implementations because their overflows do not wrap around as their authors expected. Conversely, in signed integer remainder, the C standard requires overflow behavior that is commonly not implemented. @menu * Integer Overflow Basics:: Why integer overflow is a problem * Signed Overflow Examples:: Examples of code assuming wraparound * Optimization and Wraparound:: Optimizations that break uses of wraparound * Signed Overflow Advice:: Practical advice for signed overflow issues * Signed Integer Division:: @code{INT_MIN / -1} and @code{INT_MIN % -1} @end menu @node Integer Overflow Basics @subsection Basics of Integer Overflow @cindex integer overflow @cindex overflow, signed integer @cindex signed integer overflow @cindex wraparound arithmetic In languages like C, unsigned integer overflow reliably wraps around; e.g., @code{UINT_MAX + 1} yields zero. This is guaranteed by the C standard and is portable in practice, unless you specify aggressive, nonstandard optimization options suitable only for special applications. In contrast, the C standard says that signed integer overflow leads to undefined behavior where a program can do anything, including dumping core or overrunning a buffer. The misbehavior can even precede the overflow. Such an overflow can occur during addition, subtraction, multiplication, division, and left shift. Despite this requirement of the standard, many C programs and Autoconf tests assume that signed integer overflow silently wraps around modulo a power of two, using two's complement arithmetic, so long as you cast the resulting value to a signed integer type or store it into a signed integer variable. If you use conservative optimization flags, such programs are generally portable to the vast majority of modern platforms, with a few exceptions discussed later. For historical reasons the C standard also allows implementations with ones' complement or signed magnitude arithmetic, but it is safe to assume two's complement nowadays. Also, overflow can occur when converting an out-of-range value to a signed integer type. Here a standard implementation must define what happens, but this might include raising an exception. In practice all known implementations support silent wraparound in this case, so you need not worry about other possibilities. @node Signed Overflow Examples @subsection Examples of Code Assuming Wraparound Overflow @cindex integer overflow @cindex overflow, signed integer @cindex signed integer overflow @cindex wraparound arithmetic There has long been a tension between what the C standard requires for signed integer overflow, and what C programs commonly assume. The standard allows aggressive optimizations based on assumptions that overflow never occurs, but many practical C programs rely on overflow wrapping around. These programs do not conform to the standard, but they commonly work in practice because compiler writers are understandably reluctant to implement optimizations that would break many programs, unless perhaps a user specifies aggressive optimization. The C Standard says that if a program has signed integer overflow its behavior is undefined, and the undefined behavior can even precede the overflow. To take an extreme example: @c Inspired by Robert Dewar's example in @c <https://gcc.gnu.org/ml/gcc/2007-01/msg00038.html> (2007-01-01). @example if (password == expected_password) allow_superuser_privileges (); else if (counter++ == INT_MAX) abort (); else printf ("%d password mismatches\n", counter); @end example @noindent If the @code{int} variable @code{counter} equals @code{INT_MAX}, @code{counter++} must overflow and the behavior is undefined, so the C standard allows the compiler to optimize away the test against @code{INT_MAX} and the @code{abort} call. Worse, if an earlier bug in the program lets the compiler deduce that @code{counter == INT_MAX} or that @code{counter} previously overflowed, the C standard allows the compiler to optimize away the password test and generate code that allows superuser privileges unconditionally. Despite this requirement by the standard, it has long been common for C code to assume wraparound arithmetic after signed overflow, and all known practical C implementations support some C idioms that assume wraparound signed arithmetic, even if the idioms do not conform strictly to the standard. If your code looks like the following examples it will almost surely work with real-world compilers. Here is an example derived from the 7th Edition Unix implementation of @code{atoi} (1979-01-10): @example char *p; int f, n; @dots{} while (*p >= '0' && *p <= '9') n = n * 10 + *p++ - '0'; return (f ? -n : n); @end example @noindent Even if the input string is in range, on most modern machines this has signed overflow when computing the most negative integer (the @code{-n} overflows) or a value near an extreme integer (the first @code{+} overflows). Here is another example, derived from the 7th Edition implementation of @code{rand} (1979-01-10). Here the programmer expects both multiplication and addition to wrap on overflow: @example static long int randx = 1; @dots{} randx = randx * 1103515245 + 12345; return (randx >> 16) & 077777; @end example In the following example, derived from the GNU C Library 2.5 implementation of @code{mktime} (2006-09-09), the code assumes wraparound arithmetic in @code{+} to detect signed overflow: @example time_t t, t1, t2; int sec_requested, sec_adjustment; @dots{} t1 = t + sec_requested; t2 = t1 + sec_adjustment; if (((t1 < t) != (sec_requested < 0)) | ((t2 < t1) != (sec_adjustment < 0))) return -1; @end example If your code looks like these examples, it is probably safe even though it does not strictly conform to the C standard. This might lead one to believe that one can generally assume wraparound on overflow, but that is not always true, as can be seen in the next section. @node Optimization and Wraparound @subsection Optimizations That Break Wraparound Arithmetic @cindex loop induction Compilers sometimes generate code that is incompatible with wraparound integer arithmetic. A simple example is an algebraic simplification: a compiler might translate @code{(i * 2000) / 1000} to @code{i * 2} because it assumes that @code{i * 2000} does not overflow. The translation is not equivalent to the original when overflow occurs: e.g., in the typical case of 32-bit signed two's complement wraparound @code{int}, if @code{i} has type @code{int} and value @code{1073742}, the original expression returns @minus{}2147483 but the optimized version returns the mathematically correct value 2147484. More subtly, loop induction optimizations often exploit the undefined behavior of signed overflow. Consider the following contrived function @code{sumc}: @example int sumc (int lo, int hi) @{ int sum = 0; int i; for (i = lo; i <= hi; i++) sum ^= i * 53; return sum; @} @end example @noindent To avoid multiplying by 53 each time through the loop, an optimizing compiler might internally transform @code{sumc} to the equivalent of the following: @example int transformed_sumc (int lo, int hi) @{ int sum = 0; int hic = hi * 53; int ic; for (ic = lo * 53; ic <= hic; ic += 53) sum ^= ic; return sum; @} @end example @noindent This transformation is allowed by the C standard, but it is invalid for wraparound arithmetic when @code{INT_MAX / 53 < hi}, because then the overflow in computing expressions like @code{hi * 53} can cause the expression @code{i <= hi} to yield a different value from the transformed expression @code{ic <= hic}. For this reason, compilers that use loop induction and similar techniques often do not support reliable wraparound arithmetic when a loop induction variable like @code{ic} is involved. Since loop induction variables are generated by the compiler, and are not visible in the source code, it is not always trivial to say whether the problem affects your code. Hardly any code actually depends on wraparound arithmetic in cases like these, so in practice these loop induction optimizations are almost always useful. However, edge cases in this area can cause problems. For example: @example int j; for (j = 1; 0 < j; j *= 2) test (j); @end example @noindent Here, the loop attempts to iterate through all powers of 2 that @code{int} can represent, but the C standard allows a compiler to optimize away the comparison and generate an infinite loop, under the argument that behavior is undefined on overflow. As of this writing this optimization is not done by any production version of GCC with @option{-O2}, but it might be performed by other compilers, or by more aggressive GCC optimization options, and the GCC developers have not decided whether it will continue to work with GCC and @option{-O2}. @node Signed Overflow Advice @subsection Practical Advice for Signed Overflow Issues @cindex integer overflow @cindex overflow, signed integer @cindex signed integer overflow @cindex wraparound arithmetic Ideally the safest approach is to avoid signed integer overflow entirely. For example, instead of multiplying two signed integers, you can convert them to unsigned integers, multiply the unsigned values, then test whether the result is in signed range. Rewriting code in this way will be inconvenient, though, particularly if the signed values might be negative. Also, it may hurt performance. Using unsigned arithmetic to check for overflow is particularly painful to do portably and efficiently when dealing with an integer type like @code{uid_t} whose width and signedness vary from platform to platform. Furthermore, many C applications pervasively assume wraparound behavior and typically it is not easy to find and remove all these assumptions. Hence it is often useful to maintain nonstandard code that assumes wraparound on overflow, instead of rewriting the code. The rest of this section attempts to give practical advice for this situation. If your code wants to detect signed integer overflow in @code{sum = a + b}, it is generally safe to use an expression like @code{(sum < a) != (b < 0)}. If your code uses a signed loop index, make sure that the index cannot overflow, along with all signed expressions derived from the index. Here is a contrived example of problematic code with two instances of overflow. @example for (i = INT_MAX - 10; i <= INT_MAX; i++) if (i + 1 < 0) @{ report_overflow (); break; @} @end example @noindent Because of the two overflows, a compiler might optimize away or transform the two comparisons in a way that is incompatible with the wraparound assumption. If your code uses an expression like @code{(i * 2000) / 1000} and you actually want the multiplication to wrap around on overflow, use unsigned arithmetic to do it, e.g., @code{((int) (i * 2000u)) / 1000}. If your code assumes wraparound behavior and you want to insulate it against any GCC optimizations that would fail to support that behavior, you should use GCC's @option{-fwrapv} option, which causes signed overflow to wrap around reliably (except for division and remainder, as discussed in the next section). If you need to port to platforms where signed integer overflow does not reliably wrap around (e.g., due to hardware overflow checking, or to highly aggressive optimizations), you should consider debugging with GCC's @option{-ftrapv} option, which causes signed overflow to raise an exception. @node Signed Integer Division @subsection Signed Integer Division and Integer Overflow @cindex division, integer Overflow in signed integer division is not always harmless: for example, on CPUs of the i386 family, dividing @code{INT_MIN} by @code{-1} yields a SIGFPE signal which by default terminates the program. Worse, taking the remainder of these two values typically yields the same signal on these CPUs, even though the C standard requires @code{INT_MIN % -1} to yield zero because the expression does not overflow. @node Preprocessor Arithmetic @section Preprocessor Arithmetic @cindex preprocessor arithmetic In C99 and later, preprocessor arithmetic, used for @code{#if} expressions, must be evaluated as if all signed values are of type @code{intmax_t} and all unsigned values of type @code{uintmax_t}. Many compilers are buggy in this area, though. For example, as of 2007, Sun C mishandles @code{#if LLONG_MIN < 0} on a platform with 32-bit @code{long int} and 64-bit @code{long long int}. Also, some older preprocessors mishandle constants ending in @code{LL}. To work around these problems, you can compute the value of expressions like @code{LONG_MAX < LLONG_MAX} at @code{configure}-time rather than at @code{#if}-time. @node Null Pointers @section Properties of Null Pointers @cindex null pointers Most modern hosts reliably fail when you attempt to dereference a null pointer. On almost all modern hosts, null pointers use an all-bits-zero internal representation, so you can reliably use @code{memset} with 0 to set all the pointers in an array to null values. If @code{p} is a null pointer to an object type, the C expression @code{p + 0} always evaluates to @code{p} on modern hosts, even though the standard says that it has undefined behavior. @node Buffer Overruns @section Buffer Overruns and Subscript Errors @cindex buffer overruns Buffer overruns and subscript errors are the most common dangerous errors in C programs. They result in undefined behavior because storing outside an array typically modifies storage that is used by some other object, and most modern systems lack runtime checks to catch these errors. Programs should not rely on buffer overruns being caught. There is one exception to the usual rule that a portable program cannot address outside an array. In C, it is valid to compute the address just past an object, e.g., @code{&a[N]} where @code{a} has @code{N} elements, so long as you do not dereference the resulting pointer. But it is not valid to compute the address just before an object, e.g., @code{&a[-1]}; nor is it valid to compute two past the end, e.g., @code{&a[N+1]}. On most platforms @code{&a[-1] < &a[0] && &a[N] < &a[N+1]}, but this is not reliable in general, and it is usually easy enough to avoid the potential portability problem, e.g., by allocating an extra unused array element at the start or end. @uref{https://@/www.valgrind.org/, Valgrind} can catch many overruns. GCC users might also consider using the @option{-fsanitize=} options to catch overruns. @xref{Instrumentation Options, , Program Instrumentation Options, gcc, Using the GNU Compiler Collection (GCC)}. Buffer overruns are usually caused by off-by-one errors, but there are more subtle ways to get them. Using @code{int} values to index into an array or compute array sizes causes problems on typical 64-bit hosts where an array index might be @math{2^{31}} or larger. Index values of type @code{size_t} avoid this problem, but cannot be negative. Index values of type @code{ptrdiff_t} are signed, and are wide enough in practice. If you add or multiply two numbers to calculate an array size, e.g., @code{malloc (x * sizeof y + z)}, havoc ensues if the addition or multiplication overflows. Many implementations of the @code{alloca} function silently misbehave and can generate buffer overflows if given sizes that are too large. The size limits are implementation dependent, but are at least 4000 bytes on all platforms that we know about. The standard functions @code{asctime}, @code{asctime_r}, @code{ctime}, @code{ctime_r}, and @code{gets} are prone to buffer overflows, and portable code should not use them unless the inputs are known to be within certain limits. The time-related functions can overflow their buffers if given timestamps out of range (e.g., a year less than -999 or greater than 9999). Time-related buffer overflows cannot happen with recent-enough versions of the GNU C library, but are possible with other implementations. The @code{gets} function is the worst, since it almost invariably overflows its buffer when presented with an input line larger than the buffer. @node Volatile Objects @section Volatile Objects @cindex volatile objects The keyword @code{volatile} is often misunderstood in portable code. Its use inhibits some memory-access optimizations, but programmers often wish that it had a different meaning than it actually does. @code{volatile} was designed for code that accesses special objects like memory-mapped device registers whose contents spontaneously change. Such code is inherently low-level, and it is difficult to specify portably what @code{volatile} means in these cases. The C standard says, ``What constitutes an access to an object that has volatile-qualified type is implementation-defined,'' so in theory each implementation is supposed to fill in the gap by documenting what @code{volatile} means for that implementation. In practice, though, this documentation is usually absent or incomplete. One area of confusion is the distinction between objects defined with volatile types, and volatile lvalues. From the C standard's point of view, an object defined with a volatile type has externally visible behavior. You can think of such objects as having little oscilloscope probes attached to them, so that the user can observe some properties of accesses to them, just as the user can observe data written to output files. However, the standard does not make it clear whether users can observe accesses by volatile lvalues to ordinary objects. For example: @example /* Declare and access a volatile object. Accesses to X are "visible" to users. */ static int volatile x; x = 1; /* Access two ordinary objects via a volatile lvalue. It's not clear whether accesses to *P are "visible". */ int y; int *z = malloc (sizeof (int)); int volatile *p; p = &y; *p = 1; p = z; *p = 1; @end example Programmers often wish that @code{volatile} meant ``Perform the memory access here and now, without merging several memory accesses, without changing the memory word size, and without reordering.'' But the C standard does not require this. For objects defined with a volatile type, accesses must be done before the next sequence point; but otherwise merging, reordering, and word-size change is allowed. Worse, it is not clear from the standard whether volatile lvalues provide more guarantees in general than nonvolatile lvalues, if the underlying objects are ordinary. Even when accessing objects defined with a volatile type, the C standard allows only extremely limited signal handlers: in C99 the behavior is undefined if a signal handler reads any non-local object, or writes to any non-local object whose type is not @code{sig_atomic_t volatile}, or calls any standard library function other than @code{abort}, @code{signal}, and @code{_Exit}. Hence C compilers need not worry about a signal handler disturbing ordinary computation. C11 and Posix allow some additional behavior in a portable signal handler, but are still quite restrictive. Some C implementations allow memory-access optimizations within each translation unit, such that actual behavior agrees with the behavior required by the standard only when calling a function in some other translation unit, and a signal handler acts like it was called from a different translation unit. The C99 standard hints that in these implementations, objects referred to by signal handlers ``would require explicit specification of @code{volatile} storage, as well as other implementation-defined restrictions.'' But unfortunately even for this special case these other restrictions are often not documented well. This area was significantly changed in C11, and eventually implementations will probably head in the C11 direction, but this will take some time. @xref{Volatiles, , When is a Volatile Object Accessed?, gcc, Using the GNU Compiler Collection (GCC)}, for some restrictions imposed by GCC. @xref{Defining Handlers, , Defining Signal Handlers, libc, The GNU C Library}, for some restrictions imposed by the GNU C library. Restrictions differ on other platforms. If possible, it is best to use a signal handler that fits within the limits imposed by the C and Posix standards. If this is not practical, you can try the following rules of thumb. A signal handler should access only volatile lvalues, preferably lvalues that refer to objects defined with a volatile type, and should not assume that the accessed objects have an internally consistent state if they are larger than a machine word. Furthermore, installers should employ compilers and compiler options that are commonly used for building operating system kernels, because kernels often need more from @code{volatile} than the C Standard requires, and installers who compile an application in a similar environment can sometimes benefit from the extra constraints imposed by kernels on compilers. Admittedly we are hand-waving somewhat here, as there are few guarantees in this area; the rules of thumb may help to fix some bugs but there is a good chance that they will not fix them all. For @code{volatile}, C++ has the same problems that C does. Multithreaded applications have even more problems with @code{volatile}, but they are beyond the scope of this section. The bottom line is that using @code{volatile} typically hurts performance but should not hurt correctness. In some cases its use does help correctness, but these cases are often so poorly understood that all too often adding @code{volatile} to a data structure merely alleviates some symptoms of a bug while not fixing the bug in general. @node Floating Point Portability @section Floating Point Portability @cindex floating point Almost all modern systems use IEEE-754 floating point, and it is safe to assume IEEE-754 in most portable code these days. For more information, please see David Goldberg's classic paper @uref{http://@/www.validlab.com/@/goldberg/@/paper.pdf, What Every Computer Scientist Should Know About Floating-Point Arithmetic}. @node Exiting Portably @section Exiting Portably @cindex exiting portably A C or C++ program can exit with status @var{N} by returning @var{N} from the @code{main} function. Portable programs are supposed to exit either with status 0 or @code{EXIT_SUCCESS} to succeed, or with status @code{EXIT_FAILURE} to fail, but in practice it is portable to fail by exiting with status 1, and test programs that assume Posix can fail by exiting with status values from 1 through 255. Programs on SunOS 2.0 (1985) through 3.5.2 (1988) incorrectly exited with zero status when @code{main} returned nonzero, but ancient systems like these are no longer of practical concern. A program can also exit with status @var{N} by passing @var{N} to the @code{exit} function, and a program can fail by calling the @code{abort} function. If a program is specialized to just some platforms, it can fail by calling functions specific to those platforms, e.g., @code{_exit} (Posix). However, like other functions, an exit function should be declared, typically by including a header. For example, if a C program calls @code{exit}, it should include @file{stdlib.h} either directly or via the default includes (@pxref{Default Includes}). A program can fail due to undefined behavior such as dereferencing a null pointer, but this is not recommended as undefined behavior allows an implementation to do whatever it pleases and this includes exiting successfully. @c ================================================== Manual Configuration @node Manual Configuration @chapter Manual Configuration A few kinds of features can't be guessed automatically by running test programs. For example, the details of the object-file format, or special options that need to be passed to the compiler or linker. Autoconf provides a uniform method for handling unguessable features, by giving each operating system a @dfn{canonical system type}, also known as a @dfn{canonical name} or @dfn{target triplet}. @prindex @command{config.guess} @prindex @command{config.sub} If you use any of the macros described in this chapter, you must distribute the helper scripts @command{config.guess} and @command{config.sub} along with your source code. Some Autoconf macros use these macros internally, so you may need to distribute these scripts even if you do not use any of these macros yourself. @xref{Input}, for information about the @code{AC_CONFIG_AUX_DIR} macro which you can use to control in which directory @command{configure} looks for helper scripts, and where to get the scripts from. @menu * Specifying Target Triplets:: Specifying target triplets * Canonicalizing:: Getting the canonical system type * Using System Type:: What to do with the system type @end menu @node Specifying Target Triplets @section Specifying target triplets @cindex System type @cindex Target triplet @c This node used to be named Specifying Names. The @anchor allows old @c links to still work. @anchor{Specifying Names} Autoconf-generated @command{configure} scripts can make decisions based on a canonical name for the system type, or @dfn{target triplet}, which has the form: @samp{@var{cpu}-@var{vendor}-@var{os}}, where @var{os} can be @samp{@var{system}} or @samp{@var{kernel}-@var{system}} @command{configure} can usually guess the canonical name for the type of system it's running on. To do so it runs a script called @command{config.guess}, which infers the name using the @code{uname} command or symbols predefined by the C preprocessor. Alternately, the user can specify the system type with command line arguments to @command{configure} (@pxref{System Type}. Doing so is necessary when cross-compiling. In the most complex case of cross-compiling, three system types are involved. The options to specify them are: @table @option @item --build=@var{build-type} the type of system on which the package is being configured and compiled. It defaults to the result of running @command{config.guess}. Specifying a @var{build-type} that differs from @var{host-type} enables cross-compilation mode. @item --host=@var{host-type} the type of system on which the package runs. By default it is the same as the build machine. The tools that get used to build and manipulate binaries will, by default, all be prefixed with @code{@var{host-type}-}, such as @code{@var{host-type}-gcc}, @code{@var{host-type}-g++}, @code{@var{host-type}-ar}, and @code{@var{host-type}-nm}. If the binaries produced by these tools can be executed by the build system, the configure script will make use of it in @code{AC_RUN_IFELSE} invocations; otherwise, cross-compilation mode is enabled. Specifying a @var{host-type} that differs from @var{build-type}, when @var{build-type} was also explicitly specified, equally enables cross-compilation mode. @item --target=@var{target-type} the type of system for which any compiler tools in the package produce code (rarely needed). By default, it is the same as host. @end table If you mean to override the result of @command{config.guess} but still produce binaries for the build machine, use @option{--build}, not @option{--host}. So, for example, to produce binaries for 64-bit MinGW, use a command like this: @example ./configure --host=x86_64-w64-mingw64 @end example If your system has the ability to execute MinGW binaries but you don't want to make use of this feature and instead prefer cross-compilation guesses, use a command like this: @example ./configure --build=x86_64-pc-linux-gnu --host=x86_64-w64-mingw64 @end example @noindent Note that if you do not specify @option{--host}, @command{configure} fails if it can't run the code generated by the specified compiler. For example, configuring as follows fails: @example ./configure CC=x86_64-w64-mingw64-gcc @end example When cross-compiling, @command{configure} will warn about any tools (compilers, linkers, assemblers) whose name is not prefixed with the host type. This is an aid to users performing cross-compilation. Continuing the example above, if a cross-compiler named @command{cc} is used with a native @command{pkg-config}, then libraries found by @command{pkg-config} will likely cause subtle build failures; but using the names @command{x86_64-w64-mingw64-gcc} and @command{x86_64-w64-mingw64-pkg-config} avoids any confusion. Avoiding the warning is as simple as creating the correct symlinks naming the cross tools. @cindex @command{config.sub} @command{configure} recognizes short aliases for many system types; for example, @samp{decstation} can be used instead of @samp{mips-dec-ultrix4.2}. @command{configure} runs a script called @command{config.sub} to canonicalize system type aliases. This section deliberately omits the description of the obsolete interface; see @ref{Hosts and Cross-Compilation}. @node Canonicalizing @section Getting the Canonical System Type @cindex System type @cindex Canonical system type The following macros make the system type available to @command{configure} scripts. @ovindex build_alias @ovindex host_alias @ovindex target_alias The variables @samp{build_alias}, @samp{host_alias}, and @samp{target_alias} are always exactly the arguments of @option{--build}, @option{--host}, and @option{--target}; in particular, they are left empty if the user did not use them, even if the corresponding @code{AC_CANONICAL} macro was run. Any configure script may use these variables anywhere. These are the variables that should be used when in interaction with the user. If you need to recognize some special environments based on their system type, run the following macros to get canonical system names. These variables are not set before the macro call. @defmac AC_CANONICAL_BUILD @acindex{CANONICAL_BUILD} @ovindex build @ovindex build_cpu @ovindex build_vendor @ovindex build_os Compute the canonical build-system type variable, @code{build}, and its three individual parts @code{build_cpu}, @code{build_vendor}, and @code{build_os}. If @option{--build} was specified, then @code{build} is the canonicalization of @code{build_alias} by @command{config.sub}, otherwise it is determined by the shell script @command{config.guess}. @end defmac @defmac AC_CANONICAL_HOST @acindex{CANONICAL_HOST} @ovindex host @ovindex host_cpu @ovindex host_vendor @ovindex host_os Compute the canonical host-system type variable, @code{host}, and its three individual parts @code{host_cpu}, @code{host_vendor}, and @code{host_os}. If @option{--host} was specified, then @code{host} is the canonicalization of @code{host_alias} by @command{config.sub}, otherwise it defaults to @code{build}. @end defmac @defmac AC_CANONICAL_TARGET @acindex{CANONICAL_TARGET} @ovindex target @ovindex target_cpu @ovindex target_vendor @ovindex target_os Compute the canonical target-system type variable, @code{target}, and its three individual parts @code{target_cpu}, @code{target_vendor}, and @code{target_os}. If @option{--target} was specified, then @code{target} is the canonicalization of @code{target_alias} by @command{config.sub}, otherwise it defaults to @code{host}. @end defmac Note that there can be artifacts due to the backward compatibility code. @xref{Hosts and Cross-Compilation}, for more. @node Using System Type @section Using the System Type In @file{configure.ac} the system type is generally used by one or more @code{case} statements to select system-specifics. Shell wildcards can be used to match a group of system types. For example, an extra assembler code object file could be chosen, giving access to a CPU cycle counter register. @code{$(CYCLE_OBJ)} in the following would be used in a makefile to add the object to a program or library. @example AS_CASE([$host], [alpha*-*-*], [CYCLE_OBJ=rpcc.o], [i?86-*-*], [CYCLE_OBJ=rdtsc.o], [CYCLE_OBJ=""] ) AC_SUBST([CYCLE_OBJ]) @end example @code{AC_CONFIG_LINKS} (@pxref{Configuration Links}) is another good way to select variant source files, for example optimized code for some CPUs. The configured CPU type doesn't always indicate exact CPU types, so some runtime capability checks may be necessary too. @example case $host in alpha*-*-*) AC_CONFIG_LINKS([dither.c:alpha/dither.c]) ;; powerpc*-*-*) AC_CONFIG_LINKS([dither.c:powerpc/dither.c]) ;; *-*-*) AC_CONFIG_LINKS([dither.c:generic/dither.c]) ;; esac @end example The host system type can also be used to find cross-compilation tools with @code{AC_CHECK_TOOL} (@pxref{Generic Programs}). The above examples all show @samp{$host}, since this is where the code is going to run. Only rarely is it necessary to test @samp{$build} (which is where the build is being done). Whenever you're tempted to use @samp{$host} it's worth considering whether some sort of probe would be better. New system types come along periodically or previously missing features are added. Well-written probes can adapt themselves to such things, but hard-coded lists of names can't. Here are some guidelines, @itemize @bullet @item Availability of libraries and library functions should always be checked by probing. @item Variant behavior of system calls is best identified with runtime tests if possible, but bug workarounds or obscure difficulties might have to be driven from @samp{$host}. @item Assembler code is inevitably highly CPU-specific and is best selected according to @samp{$host_cpu}. @item Assembler variations like underscore prefix on globals or ELF versus COFF type directives are however best determined by probing, perhaps even examining the compiler output. @end itemize @samp{$target} is for use by a package creating a compiler or similar. For ordinary packages it's meaningless and should not be used. It indicates what the created compiler should generate code for, if it can cross-compile. @samp{$target} generally selects various hard-coded CPU and system conventions, since usually the compiler or tools under construction themselves determine how the target works. @c ===================================================== Site Configuration. @node Site Configuration @chapter Site Configuration @command{configure} scripts support several kinds of local configuration decisions. There are ways for users to specify where external software packages are, include or exclude optional features, install programs under modified names, and set default values for @command{configure} options. @menu * Help Formatting:: Customizing @samp{configure --help} * External Software:: Working with other optional software * Package Options:: Selecting optional features * Pretty Help Strings:: Formatting help string * Option Checking:: Controlling checking of @command{configure} options * Site Details:: Configuring site details * Transforming Names:: Changing program names when installing * Site Defaults:: Giving @command{configure} local defaults @end menu @node Help Formatting @section Controlling Help Output Users consult @samp{configure --help} to learn of configuration decisions specific to your package. By default, @command{configure} breaks this output into sections for each type of option; within each section, help strings appear in the order @file{configure.ac} defines them: @example Optional Features: @dots{} --enable-bar include bar Optional Packages: @dots{} --with-foo use foo @end example @defmac AC_PRESERVE_HELP_ORDER @acindex{PRESERVE_HELP_ORDER} Request an alternate @option{--help} format, in which options of all types appear together, in the order defined. Call this macro before any @code{AC_ARG_ENABLE} or @code{AC_ARG_WITH}. @example Optional Features and Packages: @dots{} --enable-bar include bar --with-foo use foo @end example @end defmac @node External Software @section Working With External Software @cindex External software Some packages require, or can optionally use, other software packages that are already installed. The user can give @command{configure} command line options to specify which such external software to use. The options have one of these forms: @c FIXME: Can't use @ovar here, Texinfo 4.0 goes lunatic and emits something @c awful. @example --with-@var{package}@r{[}=@var{arg}@r{]} --without-@var{package} @end example For example, @option{--with-gnu-ld} means work with the GNU linker instead of some other linker. @option{--with-x} means work with The X Window System. The user can give an argument by following the package name with @samp{=} and the argument. Giving an argument of @samp{no} is for packages that are used by default; it says to @emph{not} use the package. An argument that is neither @samp{yes} nor @samp{no} could include a name or number of a version of the other package, to specify more precisely which other package this program is supposed to work with. If no argument is given, it defaults to @samp{yes}. @option{--without-@var{package}} is equivalent to @option{--with-@var{package}=no}. Normally @command{configure} scripts complain about @option{--with-@var{package}} options that they do not support. @xref{Option Checking}, for details, and for how to override the defaults. For each external software package that may be used, @file{configure.ac} should call @code{AC_ARG_WITH} to detect whether the @command{configure} user asked to use it. Whether each package is used or not by default, and which arguments are valid, is up to you. @anchor{AC_ARG_WITH} @defmac AC_ARG_WITH (@var{package}, @var{help-string}, @ @ovar{action-if-given}, @ovar{action-if-not-given}) @acindex{ARG_WITH} If the user gave @command{configure} the option @option{--with-@var{package}} or @option{--without-@var{package}}, run shell commands @var{action-if-given}. If neither option was given, run shell commands @var{action-if-not-given}. The name @var{package} indicates another software package that this program should work with. It should consist only of alphanumeric characters, dashes, plus signs, and dots. The option's argument is available to the shell commands @var{action-if-given} in the shell variable @code{withval}, which is actually just the value of the shell variable named @code{with_@var{package}}, with any non-alphanumeric characters in @var{package} changed into @samp{_}. You may use that variable instead, if you wish. Note that @var{action-if-not-given} is not expanded until the point that @code{AC_ARG_WITH} was expanded. If you need the value of @code{with_@var{package}} set to a default value by the time argument parsing is completed, use @code{m4_divert_text} to the @code{DEFAULTS} diversion (@pxref{m4_divert_text}) (if done as an argument to @code{AC_ARG_WITH}, also provide non-diverted text to avoid a shell syntax error). The argument @var{help-string} is a description of the option that looks like this: @example --with-readline support fancy command line editing @end example @noindent @var{help-string} may be more than one line long, if more detail is needed. Just make sure the columns line up in @samp{configure --help}. Avoid tabs in the help string. The easiest way to provide the proper leading whitespace is to format your @var{help-string} with the macro @code{AS_HELP_STRING} (@pxref{Pretty Help Strings}). The following example shows how to use the @code{AC_ARG_WITH} macro in a common situation. You want to let the user decide whether to enable support for an external library (e.g., the readline library); if the user specified neither @option{--with-readline} nor @option{--without-readline}, you want to enable support for readline only if the library is available on the system. @c FIXME: Remove AS_IF when the problem of AC_REQUIRE within `if' is solved. @example AC_ARG_WITH([readline], [AS_HELP_STRING([--with-readline], [support fancy command line editing @@<:@@default=check@@:>@@])], [], [: m4_divert_text([DEFAULTS], [with_readline=check])]) LIBREADLINE= AS_IF([test "x$with_readline" != xno], [AC_CHECK_LIB([readline], [main], [AC_SUBST([LIBREADLINE], ["-lreadline -lncurses"]) AC_DEFINE([HAVE_LIBREADLINE], [1], [Define if you have libreadline]) ], [if test "x$with_readline" != xcheck; then AC_MSG_FAILURE( [--with-readline was given, but test for readline failed]) fi ], -lncurses)]) @end example The next example shows how to use @code{AC_ARG_WITH} to give the user the possibility to enable support for the readline library, in case it is still experimental and not well tested, and is therefore disabled by default. @c FIXME: Remove AS_IF when the problem of AC_REQUIRE within `if' is solved. @example AC_ARG_WITH([readline], [AS_HELP_STRING([--with-readline], [enable experimental support for readline])], [], [with_readline=no]) LIBREADLINE= AS_IF([test "x$with_readline" != xno], [AC_CHECK_LIB([readline], [main], [AC_SUBST([LIBREADLINE], ["-lreadline -lncurses"]) AC_DEFINE([HAVE_LIBREADLINE], [1], [Define if you have libreadline]) ], [AC_MSG_FAILURE( [--with-readline was given, but test for readline failed])], [-lncurses])]) @end example The last example shows how to use @code{AC_ARG_WITH} to give the user the possibility to disable support for the readline library, given that it is an important feature and that it should be enabled by default. @c FIXME: Remove AS_IF when the problem of AC_REQUIRE within `if' is solved. @example AC_ARG_WITH([readline], [AS_HELP_STRING([--without-readline], [disable support for readline])], [], [with_readline=yes]) LIBREADLINE= AS_IF([test "x$with_readline" != xno], [AC_CHECK_LIB([readline], [main], [AC_SUBST([LIBREADLINE], ["-lreadline -lncurses"]) AC_DEFINE([HAVE_LIBREADLINE], [1], [Define if you have libreadline]) ], [AC_MSG_FAILURE( [readline test failed (--without-readline to disable)])], [-lncurses])]) @end example These three examples can be easily adapted to the case where @code{AC_ARG_ENABLE} should be preferred to @code{AC_ARG_WITH} (see @ref{Package Options}). @end defmac @node Package Options @section Choosing Package Options @cindex Package options @cindex Options, package If a software package has optional compile-time features, the user can give @command{configure} command line options to specify whether to compile them. The options have one of these forms: @c FIXME: Can't use @ovar here, Texinfo 4.0 goes lunatic and emits something @c awful. @example --enable-@var{feature}@r{[}=@var{arg}@r{]} --disable-@var{feature} @end example These options allow users to choose which optional features to build and install. @option{--enable-@var{feature}} options should never make a feature behave differently or cause one feature to replace another. They should only cause parts of the program to be built rather than left out. The user can give an argument by following the feature name with @samp{=} and the argument. Giving an argument of @samp{no} requests that the feature @emph{not} be made available. A feature with an argument looks like @option{--enable-debug=stabs}. If no argument is given, it defaults to @samp{yes}. @option{--disable-@var{feature}} is equivalent to @option{--enable-@var{feature}=no}. Normally @command{configure} scripts complain about @option{--enable-@var{package}} options that they do not support. @xref{Option Checking}, for details, and for how to override the defaults. For each optional feature, @file{configure.ac} should call @code{AC_ARG_ENABLE} to detect whether the @command{configure} user asked to include it. Whether each feature is included or not by default, and which arguments are valid, is up to you. @anchor{AC_ARG_ENABLE} @defmac AC_ARG_ENABLE (@var{feature}, @var{help-string}, @ @ovar{action-if-given}, @ovar{action-if-not-given}) @acindex{ARG_ENABLE} If the user gave @command{configure} the option @option{--enable-@var{feature}} or @option{--disable-@var{feature}}, run shell commands @var{action-if-given}. If neither option was given, run shell commands @var{action-if-not-given}. The name @var{feature} indicates an optional user-level facility. It should consist only of alphanumeric characters, dashes, plus signs, and dots. The option's argument is available to the shell commands @var{action-if-given} in the shell variable @code{enableval}, which is actually just the value of the shell variable named @code{enable_@var{feature}}, with any non-alphanumeric characters in @var{feature} changed into @samp{_}. You may use that variable instead, if you wish. The @var{help-string} argument is like that of @code{AC_ARG_WITH} (@pxref{External Software}). Note that @var{action-if-not-given} is not expanded until the point that @code{AC_ARG_ENABLE} was expanded. If you need the value of @code{enable_@var{feature}} set to a default value by the time argument parsing is completed, use @code{m4_divert_text} to the @code{DEFAULTS} diversion (@pxref{m4_divert_text}) (if done as an argument to @code{AC_ARG_ENABLE}, also provide non-diverted text to avoid a shell syntax error). You should format your @var{help-string} with the macro @code{AS_HELP_STRING} (@pxref{Pretty Help Strings}). See the examples suggested with the definition of @code{AC_ARG_WITH} (@pxref{External Software}) to get an idea of possible applications of @code{AC_ARG_ENABLE}. @end defmac @node Pretty Help Strings @section Making Your Help Strings Look Pretty @cindex Help strings Properly formatting the @samp{help strings} which are used in @code{AC_ARG_WITH} (@pxref{External Software}) and @code{AC_ARG_ENABLE} (@pxref{Package Options}) can be challenging. Specifically, you want your own @samp{help strings} to line up in the appropriate columns of @samp{configure --help} just like the standard Autoconf @samp{help strings} do. This is the purpose of the @code{AS_HELP_STRING} macro. @anchor{AS_HELP_STRING} @defmac AS_HELP_STRING (@var{left-hand-side}, @var{right-hand-side} @ @dvar{indent-column, 26}, @dvar{wrap-column, 79}) @asindex{HELP_STRING} Expands into a help string that looks pretty when the user executes @samp{configure --help}. It is typically used in @code{AC_ARG_WITH} (@pxref{External Software}) or @code{AC_ARG_ENABLE} (@pxref{Package Options}). The following example makes this clearer. @example AC_ARG_WITH([foo], [AS_HELP_STRING([--with-foo], [use foo (default is no)])], [use_foo=$withval], [use_foo=no]) @end example Then the last few lines of @samp{configure --help} appear like this: @example --enable and --with options recognized: --with-foo use foo (default is no) @end example Macro expansion is performed on the first argument. However, the second argument of @code{AS_HELP_STRING} is treated as a whitespace separated list of text to be reformatted, and is not subject to macro expansion. Since it is not expanded, it should not be double quoted. @xref{Autoconf Language}, for a more detailed explanation. The @code{AS_HELP_STRING} macro is particularly helpful when the @var{left-hand-side} and/or @var{right-hand-side} are composed of macro arguments, as shown in the following example. Be aware that @var{left-hand-side} may not expand to unbalanced quotes, although quadrigraphs can be used. @example AC_DEFUN([MY_ARG_WITH], [AC_ARG_WITH(m4_translit([[$1]], [_], [-]), [AS_HELP_STRING([--with-m4_translit([$1], [_], [-])], [use $1 (default is $2)])], [use_[]$1=$withval], [use_[]$1=$2])]) MY_ARG_WITH([a_b], [no]) @end example @noindent Here, the last few lines of @samp{configure --help} will include: @example --enable and --with options recognized: --with-a-b use a_b (default is no) @end example The parameters @var{indent-column} and @var{wrap-column} were introduced in Autoconf 2.62. Generally, they should not be specified; they exist for fine-tuning of the wrapping. @example AS_HELP_STRING([--option], [description of option]) @result{} --option description of option AS_HELP_STRING([--option], [description of option], [15], [30]) @result{} --option description of @result{} option @end example @end defmac @node Option Checking @section Controlling Checking of @command{configure} Options @cindex Options, Package The @command{configure} script checks its command-line options against a list of known options, like @option{--help} or @option{--config-cache}. An unknown option ordinarily indicates a mistake by the user and @command{configure} halts with an error. However, by default unknown @option{--with-@var{package}} and @option{--enable-@var{feature}} options elicit only a warning, to support configuring entire source trees. Source trees often contain multiple packages with a top-level @command{configure} script that uses the @code{AC_CONFIG_SUBDIRS} macro (@pxref{Subdirectories}). Because the packages generally support different @option{--with-@var{package}} and @option{--enable-@var{feature}} options, the GNU Coding Standards say they must accept unrecognized options without halting. Even a warning message is undesirable here, so @code{AC_CONFIG_SUBDIRS} automatically disables the warnings. This default behavior may be modified in two ways. First, the installer can invoke @code{configure --disable-option-checking} to disable these warnings, or invoke @code{configure --enable-option-checking=fatal} options to turn them into fatal errors, respectively. Second, the maintainer can use @code{AC_DISABLE_OPTION_CHECKING}. @defmac AC_DISABLE_OPTION_CHECKING @acindex{DISABLE_OPTION_CHECKING} By default, disable warnings related to any unrecognized @option{--with-@var{package}} or @option{--enable-@var{feature}} options. This is implied by @code{AC_CONFIG_SUBDIRS}. The installer can override this behavior by passing @option{--enable-option-checking} (enable warnings) or @option{--enable-option-checking=fatal} (enable errors) to @command{configure}. @end defmac @node Site Details @section Configuring Site Details @cindex Site details Some software packages require complex site-specific information. Some examples are host names to use for certain services, company names, and email addresses to contact. Since some configuration scripts generated by Metaconfig ask for such information interactively, people sometimes wonder how to get that information in Autoconf-generated configuration scripts, which aren't interactive. Such site configuration information should be put in a file that is edited @emph{only by users}, not by programs. The location of the file can either be based on the @code{prefix} variable, or be a standard location such as the user's home directory. It could even be specified by an environment variable. The programs should examine that file at runtime, rather than at compile time. Runtime configuration is more convenient for users and makes the configuration process simpler than getting the information while configuring. @xref{Directory Variables, , Variables for Installation Directories, standards, The GNU Coding Standards}, for more information on where to put data files. @node Transforming Names @section Transforming Program Names When Installing @cindex Transforming program names @cindex Program names, transforming Autoconf supports changing the names of programs when installing them. In order to use these transformations, @file{configure.ac} must call the macro @code{AC_ARG_PROGRAM}. @defmac AC_ARG_PROGRAM @acindex{ARG_PROGRAM} @ovindex program_transform_name Place in output variable @code{program_transform_name} a sequence of @code{sed} commands for changing the names of installed programs. If any of the options described below are given to @command{configure}, program names are transformed accordingly. Otherwise, if @code{AC_CANONICAL_TARGET} has been called and a @option{--target} value is given, the target type followed by a dash is used as a prefix. Otherwise, no program name transformation is done. @end defmac @menu * Transformation Options:: @command{configure} options to transform names * Transformation Examples:: Sample uses of transforming names * Transformation Rules:: Makefile uses of transforming names @end menu @node Transformation Options @subsection Transformation Options You can specify name transformations by giving @command{configure} these command line options: @table @option @item --program-prefix=@var{prefix} prepend @var{prefix} to the names; @item --program-suffix=@var{suffix} append @var{suffix} to the names; @item --program-transform-name=@var{expression} perform @code{sed} substitution @var{expression} on the names. @end table @node Transformation Examples @subsection Transformation Examples These transformations are useful with programs that can be part of a cross-compilation development environment. For example, a cross-assembler running on x86-64 configured with @option{--target=aarch64-linux-gnu} is normally installed as @file{aarch64-linux-gnu-as}, rather than @file{as}, which could be confused with a native x86-64 assembler. You can force a program name to begin with @file{g}, if you don't want GNU programs installed on your system to shadow other programs with the same name. For example, if you configure GNU @code{diff} with @option{--program-prefix=g}, then when you run @samp{make install} it is installed as @file{/usr/local/bin/gdiff}. As a more sophisticated example, you could use @example --program-transform-name='s/^/g/; s/^gg/g/; s/^gless/less/' @end example @noindent to prepend @samp{g} to most of the program names in a source tree, excepting those like @code{gdb} that already have one and those like @code{less} and @code{lesskey} that aren't GNU programs. (That is assuming that you have a source tree containing those programs that is set up to use this feature.) One way to install multiple versions of some programs simultaneously is to append a version number to the name of one or both. For example, if you want to keep Autoconf version 1 around for awhile, you can configure Autoconf version 2 using @option{--program-suffix=2} to install the programs as @file{/usr/local/bin/autoconf2}, @file{/usr/local/bin/autoheader2}, etc. Nevertheless, pay attention that only the binaries are renamed, therefore you'd have problems with the library files which might overlap. @node Transformation Rules @subsection Transformation Rules Here is how to use the variable @code{program_transform_name} in a @file{Makefile.in}: @example PROGRAMS = cp ls rm transform = @@program_transform_name@@ install: for p in $(PROGRAMS); do \ $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/`echo $$p | \ sed '$(transform)'`; \ done uninstall: for p in $(PROGRAMS); do \ rm -f $(DESTDIR)$(bindir)/`echo $$p | sed '$(transform)'`; \ @c $$ restore font-lock done @end example It is guaranteed that @code{program_transform_name} is never empty, and that there are no useless separators. Therefore you may safely embed @code{program_transform_name} within a sed program using @samp{;}: @example transform = @@program_transform_name@@ transform_exe = s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/ @end example Whether to do the transformations on documentation files (Texinfo or @code{man}) is a tricky question; there seems to be no perfect answer, due to the several reasons for name transforming. Documentation is not usually particular to a specific architecture, and Texinfo files do not conflict with system documentation. But they might conflict with earlier versions of the same files, and @code{man} pages sometimes do conflict with system documentation. As a compromise, it is probably best to do name transformations on @code{man} pages but not on Texinfo manuals. @node Site Defaults @section Setting Site Defaults @cindex Site defaults @cindex config.site Autoconf-generated @command{configure} scripts allow your site to provide default values for some configuration values. You do this by creating site- and system-wide initialization files. @evindex CONFIG_SITE If the environment variable @code{CONFIG_SITE} is set, @command{configure} uses its value as a space-separated list of shell scripts to read; it is recommended that these be absolute file names. Otherwise, it reads the shell script @file{@var{prefix}/share/config.site} if it exists, then @file{@var{prefix}/etc/config.site} if it exists. Thus, settings in machine-specific files override those in machine-independent ones in case of conflict. Site files can be arbitrary shell scripts, but only certain kinds of code are really appropriate to be in them. Because @command{configure} reads any cache file after it has read any site files, a site file can define a default cache file to be shared between all Autoconf-generated @command{configure} scripts run on that system (@pxref{Cache Files}). If you set a default cache file in a site file, it is a good idea to also set the output variable @code{CC} in that site file, because the cache file is only valid for a particular compiler, but many systems have several available. You can examine or override the value set by a command line option to @command{configure} in a site file; options set shell variables that have the same names as the options, with any dashes turned into underscores. The exceptions are that @option{--without-} and @option{--disable-} options are like giving the corresponding @option{--with-} or @option{--enable-} option and the value @samp{no}. Thus, @option{--cache-file=localcache} sets the variable @code{cache_file} to the value @samp{localcache}; @option{--enable-warnings=no} or @option{--disable-warnings} sets the variable @code{enable_warnings} to the value @samp{no}; @option{--prefix=/usr} sets the variable @code{prefix} to the value @samp{/usr}; etc. Site files are also good places to set default values for other output variables, such as @code{CFLAGS}, if you need to give them non-default values: anything you would normally do, repetitively, on the command line. If you use non-default values for @var{prefix} or @var{exec_prefix} (wherever you locate the site file), you can set them in the site file if you specify it with the @code{CONFIG_SITE} environment variable. You can set some cache values in the site file itself. Doing this is useful if you are cross-compiling, where it is impossible to check features that require running a test program. You could ``prime the cache'' by setting those values correctly for that system in @file{@var{prefix}/etc/config.site}. To find out the names of the cache variables you need to set, see the documentation of the respective Autoconf macro. If the variables or their semantics are undocumented, you may need to look for shell variables with @samp{_cv_} in their names in the affected @command{configure} scripts, or in the Autoconf M4 source code for those macros; but in that case, their name or semantics may change in a future Autoconf version. The cache file is careful to not override any variables set in the site files. Similarly, you should not override command-line options in the site files. Your code should check that variables such as @code{prefix} and @code{cache_file} have their default values (as set near the top of @command{configure}) before changing them. Here is a sample file @file{/usr/share/local/@/gnu/share/@/config.site}. The command @samp{configure --prefix=/usr/share/local/gnu} would read this file (if @code{CONFIG_SITE} is not set to a different file). @example # /usr/share/local/gnu/share/config.site for configure # # Change some defaults. test "$prefix" = NONE && prefix=/usr/share/local/gnu test "$exec_prefix" = NONE && exec_prefix=/usr/local/gnu test "$sharedstatedir" = '$@{prefix@}/com' && sharedstatedir=/var test "$localstatedir" = '$@{prefix@}/var' && localstatedir=/var test "$runstatedir" = '$@{localstatedir@}/run' && runstatedir=/run # Give Autoconf 2.x generated configure scripts a shared default # cache file for feature test results, architecture-specific. if test "$cache_file" = /dev/null; then cache_file="$prefix/var/config.cache" # A cache file is only valid for one C compiler. CC=gcc fi @end example @c Leave this use of "File system" rendered as one word, but @c slightly obfuscated so as not to trigger the syntax-check prohibition. @cindex File@/system Hierarchy Standard @cindex FHS Another use of @file{config.site} is for priming the directory variables @c "File system", but slightly obfuscated, as above. in a manner consistent with the File@/system Hierarchy Standard (FHS). Once the following file is installed at @file{/usr/share/config.site}, a user can execute simply @code{./configure --prefix=/usr} to get all the directories chosen in the locations recommended by FHS. @example # /usr/share/config.site for FHS defaults when installing below /usr, # and the respective settings were not changed on the command line. if test "$prefix" = /usr; then test "$sysconfdir" = '$@{prefix@}/etc' && sysconfdir=/etc test "$sharedstatedir" = '$@{prefix@}/com' && sharedstatedir=/var test "$localstatedir" = '$@{prefix@}/var' && localstatedir=/var fi @end example @cindex @file{lib64} @cindex 64-bit libraries Likewise, on platforms where 64-bit libraries are built by default, then installed in @file{/usr/local/@/lib64} instead of @file{/usr/local/@/lib}, it is appropriate to install @file{/usr/local/@/share/config.site}: @example # /usr/local/share/config.site for platforms that prefer # the directory /usr/local/lib64 over /usr/local/lib. test "$libdir" = '$@{exec_prefix@}/lib' && libdir='$@{exec_prefix@}/lib64' @end example @c ============================================== Running configure Scripts. @node Running configure Scripts @chapter Running @command{configure} Scripts @cindex @command{configure} Below are instructions on how to configure a package that uses a @command{configure} script, suitable for inclusion as an @file{INSTALL} file in the package. A plain-text version of @file{INSTALL} which you may use comes with Autoconf. @menu * Basic Installation:: Instructions for typical cases * Compilers and Options:: Selecting compilers and optimization * Multiple Architectures:: Compiling for multiple architectures at once * Installation Names:: Installing in different directories * Optional Features:: Selecting optional features * Particular Systems:: Particular systems * System Type:: Specifying the system type * Sharing Defaults:: Setting site-wide defaults for @command{configure} * Defining Variables:: Specifying the compiler etc. * configure Invocation:: Changing how @command{configure} runs @end menu @set autoconf @include install.texi @c ============================================== config.status Invocation @node config.status Invocation @chapter config.status Invocation @cindex @command{config.status} The @command{configure} script creates a file named @file{config.status}, which actually configures, @dfn{instantiates}, the template files. It also records the configuration options that were specified when the package was last configured in case reconfiguring is needed. Synopsis: @example ./config.status @ovar{option}@dots{} @ovar{tag}@dots{} @end example It configures each @var{tag}; if none are specified, all the templates are instantiated. A @var{tag} refers to a file or other tag associated with a configuration action, as specified by an @code{AC_CONFIG_@var{ITEMS}} macro (@pxref{Configuration Actions}). The files must be specified without their dependencies, as in @example ./config.status foobar @end example @noindent not @example ./config.status foobar:foo.in:bar.in @end example The supported options are: @table @option @item --help @itemx -h Print a summary of the command line options, the list of the template files, and exit. @item --version @itemx -V Print the version number of Autoconf and the configuration settings, and exit. @item --config Print the configuration settings in reusable way, quoted for the shell, and exit. For example, for a debugging build that otherwise reuses the configuration from a different build directory @var{build-dir} of a package in @var{src-dir}, you could use the following: @example args=`@var{build-dir}/config.status --config` eval @var{src-dir}/configure "$args" CFLAGS=-g --srcdir=@var{src-dir} @end example @noindent Note that it may be necessary to override a @option{--srcdir} setting that was saved in the configuration, if the arguments are used in a different build directory. @item --silent @itemx --quiet @itemx -q Do not print progress messages. @item --debug @itemx -d Don't remove the temporary files. @item --file=@var{file}[:@var{template}] Require that @var{file} be instantiated as if @samp{AC_CONFIG_FILES(@var{file}:@var{template})} was used. Both @var{file} and @var{template} may be @samp{-} in which case the standard output and/or standard input, respectively, is used. If a @var{template} file name is relative, it is first looked for in the build tree, and then in the source tree. @xref{Configuration Actions}, for more details. This option and the following ones provide one way for separately distributed packages to share the values computed by @command{configure}. Doing so can be useful if some of the packages need a superset of the features that one of them, perhaps a common library, does. These options allow a @file{config.status} file to create files other than the ones that its @file{configure.ac} specifies, so it can be used for a different package, or for extracting a subset of values. For example, @example echo '@@CC@@' | ./config.status --file=- @end example @noindent provides the value of @code{@@CC@@} on standard output. @item --header=@var{file}[:@var{template}] Same as @option{--file} above, but with @samp{AC_CONFIG_HEADERS}. @item --recheck Ask @file{config.status} to update itself and exit (no instantiation). This option is useful if you change @command{configure}, so that the results of some tests might be different from the previous run. The @option{--recheck} option reruns @command{configure} with the same arguments you used before, plus the @option{--no-create} option, which prevents @command{configure} from running @file{config.status} and creating @file{Makefile} and other files, and the @option{--no-recursion} option, which prevents @command{configure} from running other @command{configure} scripts in subdirectories. (This is so other Make rules can run @file{config.status} when it changes; @pxref{Automatic Remaking}, for an example). @end table @file{config.status} checks several optional environment variables that can alter its behavior: @anchor{CONFIG_SHELL} @defvar CONFIG_SHELL @evindex CONFIG_SHELL The shell with which to run @command{configure}. It must be Bourne-compatible, and the absolute name of the shell should be passed. The default is a shell that supports @code{LINENO} if available, and @file{/bin/sh} otherwise. @end defvar @defvar CONFIG_STATUS @evindex CONFIG_STATUS The file name to use for the shell script that records the configuration. The default is @file{./config.status}. This variable is useful when one package uses parts of another and the @command{configure} scripts shouldn't be merged because they are maintained separately. @end defvar You can use @file{./config.status} in your makefiles. For example, in the dependencies given above (@pxref{Automatic Remaking}), @file{config.status} is run twice when @file{configure.ac} has changed. If that bothers you, you can make each run only regenerate the files for that rule: @example @group config.h: stamp-h stamp-h: config.h.in config.status ./config.status config.h echo > stamp-h Makefile: Makefile.in config.status ./config.status Makefile @end group @end example The calling convention of @file{config.status} has changed; see @ref{Obsolete config.status Use}, for details. @c =================================================== Obsolete Constructs @node Obsolete Constructs @chapter Obsolete Constructs @cindex Obsolete constructs Autoconf changes, and throughout the years some constructs have been obsoleted. Most of the changes involve the macros, but in some cases the tools themselves, or even some concepts, are now considered obsolete. You may completely skip this chapter if you are new to Autoconf. Its intention is mainly to help maintainers updating their packages by understanding how to move to more modern constructs. @menu * Obsolete config.status Use:: Obsolete convention for @command{config.status} * acconfig Header:: Additional entries in @file{config.h.in} * autoupdate Invocation:: Automatic update of @file{configure.ac} * Obsolete Macros:: Backward compatibility macros * Autoconf 1:: Tips for upgrading your files * Autoconf 2.13:: Some fresher tips @end menu @node Obsolete config.status Use @section Obsolete @file{config.status} Invocation @file{config.status} now supports arguments to specify the files to instantiate; see @ref{config.status Invocation}, for more details. Before, environment variables had to be used. @defvar CONFIG_COMMANDS @evindex CONFIG_COMMANDS The tags of the commands to execute. The default is the arguments given to @code{AC_OUTPUT} and @code{AC_CONFIG_COMMANDS} in @file{configure.ac}. @end defvar @defvar CONFIG_FILES @evindex CONFIG_FILES The files in which to perform @samp{@@@var{variable}@@} substitutions. The default is the arguments given to @code{AC_OUTPUT} and @code{AC_CONFIG_FILES} in @file{configure.ac}. @end defvar @defvar CONFIG_HEADERS @evindex CONFIG_HEADERS The files in which to substitute C @code{#define} statements. The default is the arguments given to @code{AC_CONFIG_HEADERS}; if that macro was not called, @file{config.status} ignores this variable. @end defvar @defvar CONFIG_LINKS @evindex CONFIG_LINKS The symbolic links to establish. The default is the arguments given to @code{AC_CONFIG_LINKS}; if that macro was not called, @file{config.status} ignores this variable. @end defvar In @ref{config.status Invocation}, using this old interface, the example would be: @example @group config.h: stamp-h stamp-h: config.h.in config.status CONFIG_COMMANDS= CONFIG_LINKS= CONFIG_FILES= \ CONFIG_HEADERS=config.h ./config.status echo > stamp-h Makefile: Makefile.in config.status CONFIG_COMMANDS= CONFIG_LINKS= CONFIG_HEADERS= \ CONFIG_FILES=Makefile ./config.status @end group @end example @noindent (If @file{configure.ac} does not call @code{AC_CONFIG_HEADERS}, there is no need to set @code{CONFIG_HEADERS} in the @command{make} rules. Equally for @code{CONFIG_COMMANDS}, etc.) @node acconfig Header @section @file{acconfig.h} @cindex @file{acconfig.h} @cindex @file{config.h.top} @cindex @file{config.h.bot} In order to produce @file{config.h.in}, @command{autoheader} needs to build or to find templates for each symbol. Modern releases of Autoconf use @code{AH_VERBATIM} and @code{AH_TEMPLATE} (@pxref{Autoheader Macros}), but in older releases a file, @file{acconfig.h}, contained the list of needed templates. @command{autoheader} copied comments and @code{#define} and @code{#undef} statements from @file{acconfig.h} in the current directory, if present. This file used to be mandatory if you @code{AC_DEFINE} any additional symbols. Modern releases of Autoconf also provide @code{AH_TOP} and @code{AH_BOTTOM} if you need to prepend/append some information to @file{config.h.in}. Ancient versions of Autoconf had a similar feature: if @file{./acconfig.h} contains the string @samp{@@TOP@@}, @command{autoheader} copies the lines before the line containing @samp{@@TOP@@} into the top of the file that it generates. Similarly, if @file{./acconfig.h} contains the string @samp{@@BOTTOM@@}, @command{autoheader} copies the lines after that line to the end of the file it generates. Either or both of those strings may be omitted. An even older alternate way to produce the same effect in ancient versions of Autoconf is to create the files @file{@var{file}.top} (typically @file{config.h.top}) and/or @file{@var{file}.bot} in the current directory. If they exist, @command{autoheader} copies them to the beginning and end, respectively, of its output. In former versions of Autoconf, the files used in preparing a software package for distribution were: @example @group configure.ac --. .------> autoconf* -----> configure +---+ [aclocal.m4] --+ `---. [acsite.m4] ---' | +--> [autoheader*] -> [config.h.in] [acconfig.h] ----. | +-----' [config.h.top] --+ [config.h.bot] --' @end group @end example Using only the @code{AH_} macros, @file{configure.ac} should be self-contained, and should not depend upon @file{acconfig.h} etc. @node autoupdate Invocation @section Using @command{autoupdate} to Modernize @file{configure.ac} @cindex @command{autoupdate} The @command{autoupdate} program updates a @file{configure.ac} file that calls Autoconf macros by their old names to use the current macro names. In version 2 of Autoconf, most of the macros were renamed to use a more uniform and descriptive naming scheme. @xref{Macro Names}, for a description of the new scheme. Although the old names still work (@pxref{Obsolete Macros}, for a list of the old macros and the corresponding new names), you can make your @file{configure.ac} files more readable and make it easier to use the current Autoconf documentation if you update them to use the new macro names. @evindex SIMPLE_BACKUP_SUFFIX If given no arguments, @command{autoupdate} updates @file{configure.ac}, backing up the original version with the suffix @file{~} (or the value of the environment variable @code{SIMPLE_BACKUP_SUFFIX}, if that is set). If you give @command{autoupdate} an argument, it reads that file instead of @file{configure.ac} and writes the updated file to the standard output. @noindent @command{autoupdate} accepts the following options: @table @option @item --help @itemx -h Print a summary of the command line options and exit. @item --version @itemx -V Print the version number of Autoconf and exit. @item --verbose @itemx -v Report processing steps. @item --debug @itemx -d Don't remove the temporary files. @item --force @itemx -f Force the update even if the file has not changed. Disregard the cache. @item --include=@var{dir} @itemx -I @var{dir} Also look for input files in @var{dir}. Multiple invocations accumulate. Directories are browsed from last to first. @item --prepend-include=@var{dir} @itemx -B @var{dir} Prepend directory @var{dir} to the search path. This is used to include the language-specific files before any third-party macros. @end table @node Obsolete Macros @section Obsolete Macros Several macros are obsoleted in Autoconf, for various reasons (typically they failed to quote properly, couldn't be extended for more recent issues, etc.). They are still supported, but deprecated: their use should be avoided. During the jump from Autoconf version 1 to version 2, most of the macros were renamed to use a more uniform and descriptive naming scheme, but their signature did not change. @xref{Macro Names}, for a description of the new naming scheme. Below, if there is just the mapping from old names to new names for these macros, the reader is invited to refer to the definition of the new macro for the signature and the description. @defmac AC_AIX @acindex{AIX} @cvindex _ALL_SOURCE This macro is a platform-specific subset of @code{AC_USE_SYSTEM_EXTENSIONS} (@pxref{AC_USE_SYSTEM_EXTENSIONS}). @end defmac @defmac AC_ALLOCA @acindex{ALLOCA} Replaced by @code{AC_FUNC_ALLOCA} (@pxref{AC_FUNC_ALLOCA}). @end defmac @defmac AC_ARG_ARRAY @acindex{ARG_ARRAY} Removed because of limited usefulness. @end defmac @defmac AC_C_CROSS @acindex{C_CROSS} This macro is obsolete; it does nothing. @end defmac @defmac AC_C_LONG_DOUBLE @acindex{C_LONG_DOUBLE} @cvindex HAVE_LONG_DOUBLE If the C compiler supports a working @code{long double} type with more range or precision than the @code{double} type, define @code{HAVE_LONG_DOUBLE}. You should use @code{AC_TYPE_LONG_DOUBLE} or @code{AC_TYPE_LONG_DOUBLE_WIDER} instead. @xref{Particular Types}. @end defmac @defmac AC_CANONICAL_SYSTEM @acindex{CANONICAL_SYSTEM} Determine the system type and set output variables to the names of the canonical system types. @xref{Canonicalizing}, for details about the variables this macro sets. The user is encouraged to use either @code{AC_CANONICAL_BUILD}, or @code{AC_CANONICAL_HOST}, or @code{AC_CANONICAL_TARGET}, depending on the needs. Using @code{AC_CANONICAL_TARGET} is enough to run the two other macros (@pxref{Canonicalizing}). @end defmac @defmac AC_CHAR_UNSIGNED @acindex{CHAR_UNSIGNED} Replaced by @code{AC_C_CHAR_UNSIGNED} (@pxref{AC_C_CHAR_UNSIGNED}). @end defmac @defmac AC_CHECK_TYPE (@var{type}, @var{default}) @acindex{CHECK_TYPE} Autoconf, up to 2.13, used to provide this version of @code{AC_CHECK_TYPE}, deprecated because of its flaws. First, although it is a member of the @code{CHECK} clan, it does more than just checking. Secondly, missing types are defined using @code{#define}, not @code{typedef}, and this can lead to problems in the case of pointer types. This use of @code{AC_CHECK_TYPE} is obsolete and discouraged; see @ref{Generic Types}, for the description of the current macro. If the type @var{type} is not defined, define it to be the C (or C++) builtin type @var{default}, e.g., @samp{short int} or @samp{unsigned int}. This macro is equivalent to: @example AC_CHECK_TYPE([@var{type}], [], [AC_DEFINE_UNQUOTED([@var{type}], [@var{default}], [Define to `@var{default}' if <sys/types.h> does not define.])]) @end example In order to keep backward compatibility, the two versions of @code{AC_CHECK_TYPE} are implemented, selected using these heuristics: @enumerate @item If there are three or four arguments, the modern version is used. @item If the second argument appears to be a C or C++ type, then the obsolete version is used. This happens if the argument is a C or C++ @emph{builtin} type or a C identifier ending in @samp{_t}, optionally followed by one of @samp{[(* } and then by a string of zero or more characters taken from the set @samp{[]()* _a-zA-Z0-9}. @item If the second argument is spelled with the alphabet of valid C and C++ types, the user is warned and the modern version is used. @item Otherwise, the modern version is used. @end enumerate @noindent You are encouraged either to use a valid builtin type, or to use the equivalent modern code (see above), or better yet, to use @code{AC_CHECK_TYPES} together with @example #ifndef HAVE_LOFF_T typedef loff_t off_t; #endif @end example @end defmac @c end of AC_CHECK_TYPE @defmac AC_CHECKING (@var{feature-description}) @acindex{CHECKING} Same as @example AC_MSG_NOTICE([checking @var{feature-description}@dots{}] @end example @noindent @xref{AC_MSG_NOTICE}. @end defmac @defmac AC_COMPILE_CHECK (@var{echo-text}, @var{includes}, @ @var{function-body}, @var{action-if-true}, @ovar{action-if-false}) @acindex{COMPILE_CHECK} This is an obsolete version of @code{AC_TRY_COMPILE} itself replaced by @code{AC_COMPILE_IFELSE} (@pxref{Running the Compiler}), with the addition that it prints @samp{checking for @var{echo-text}} to the standard output first, if @var{echo-text} is non-empty. Use @code{AC_MSG_CHECKING} and @code{AC_MSG_RESULT} instead to print messages (@pxref{Printing Messages}). @end defmac @defmac AC_CONST @acindex{CONST} Replaced by @code{AC_C_CONST} (@pxref{AC_C_CONST}). @end defmac @defmac AC_CROSS_CHECK @acindex{CROSS_CHECK} Same as @code{AC_C_CROSS}, which is obsolete too, and does nothing @code{:-)}. @end defmac @defmac AC_CYGWIN @acindex{CYGWIN} @evindex CYGWIN Check for the Cygwin environment in which case the shell variable @code{CYGWIN} is set to @samp{yes}. Don't use this macro, the dignified means to check the nature of the host is using @code{AC_CANONICAL_HOST} (@pxref{Canonicalizing}). As a matter of fact this macro is defined as: @example AC_REQUIRE([AC_CANONICAL_HOST])[]dnl case $host_os in *cygwin* ) CYGWIN=yes;; * ) CYGWIN=no;; esac @end example Beware that the variable @env{CYGWIN} has a special meaning when running Cygwin, and should not be changed. That's yet another reason not to use this macro. @end defmac @defmac AC_DECL_SYS_SIGLIST @acindex{DECL_SYS_SIGLIST} @cvindex SYS_SIGLIST_DECLARED Same as: @example AC_CHECK_DECLS([sys_siglist], [], [], [#include <signal.h> /* NetBSD declares sys_siglist in unistd.h. */ #ifdef HAVE_UNISTD_H # include <unistd.h> #endif ]) @end example @noindent @xref{AC_CHECK_DECLS}. @end defmac @defmac AC_DECL_YYTEXT @acindex{DECL_YYTEXT} Does nothing, now integrated in @code{AC_PROG_LEX} (@pxref{AC_PROG_LEX}). @end defmac @defmac AC_DIAGNOSE (@var{category}, @var{message}) @acindex{DIAGNOSE} Replaced by @code{m4_warn} (@pxref{m4_warn}). @end defmac @defmac AC_DIR_HEADER @acindex{DIR_HEADER} @cvindex DIRENT @cvindex SYSNDIR @cvindex SYSDIR @cvindex NDIR Like calling @code{AC_FUNC_CLOSEDIR_VOID} (@pxref{AC_FUNC_CLOSEDIR_VOID}) and @code{AC_HEADER_DIRENT} (@pxref{AC_HEADER_DIRENT}), but defines a different set of C preprocessor macros to indicate which header file is found: @multitable {@file{sys/ndir.h}} {Old Symbol} {@code{HAVE_SYS_NDIR_H}} @item Header @tab Old Symbol @tab New Symbol @item @file{dirent.h} @tab @code{DIRENT} @tab @code{HAVE_DIRENT_H} @item @file{sys/ndir.h} @tab @code{SYSNDIR} @tab @code{HAVE_SYS_NDIR_H} @item @file{sys/dir.h} @tab @code{SYSDIR} @tab @code{HAVE_SYS_DIR_H} @item @file{ndir.h} @tab @code{NDIR} @tab @code{HAVE_NDIR_H} @end multitable @end defmac @defmac AC_DYNIX_SEQ @acindex{DYNIX_SEQ} If on DYNIX/ptx, add @option{-lseq} to output variable @code{LIBS}. This macro used to be defined as @example AC_CHECK_LIB([seq], [getmntent], [LIBS="-lseq $LIBS"]) @end example @noindent now it is just @code{AC_FUNC_GETMNTENT} (@pxref{AC_FUNC_GETMNTENT}). @end defmac @defmac AC_EXEEXT @acindex{EXEEXT} @ovindex EXEEXT Defined the output variable @code{EXEEXT} based on the output of the compiler, which is now done automatically. Typically set to empty string if Posix and @samp{.exe} if a DOS variant. @end defmac @defmac AC_EMXOS2 @acindex{EMXOS2} Similar to @code{AC_CYGWIN} but checks for the EMX environment on OS/2 and sets @code{EMXOS2}. Don't use this macro, the dignified means to check the nature of the host is using @code{AC_CANONICAL_HOST} (@pxref{Canonicalizing}). @end defmac @defmac AC_ENABLE (@var{feature}, @var{action-if-given}, @ @ovar{action-if-not-given}) @acindex{ENABLE} This is an obsolete version of @code{AC_ARG_ENABLE} that does not support providing a help string (@pxref{AC_ARG_ENABLE}). @end defmac @defmac AC_ERROR @acindex{ERROR} Replaced by @code{AC_MSG_ERROR} (@pxref{AC_MSG_ERROR}). @end defmac @defmac AC_FATAL (@var{message}) @acindex{FATAL} Replaced by @code{m4_fatal} (@pxref{m4_fatal}). @end defmac @defmac AC_FIND_X @acindex{FIND_X} Replaced by @code{AC_PATH_X} (@pxref{AC_PATH_X}). @end defmac @defmac AC_FIND_XTRA @acindex{FIND_XTRA} Replaced by @code{AC_PATH_XTRA} (@pxref{AC_PATH_XTRA}). @end defmac @defmac AC_FOREACH @acindex{FOREACH} Replaced by @code{m4_foreach_w} (@pxref{m4_foreach_w}). @end defmac @defmac AC_FUNC_CHECK @acindex{FUNC_CHECK} Replaced by @code{AC_CHECK_FUNC} (@pxref{AC_CHECK_FUNC}). @end defmac @anchor{AC_FUNC_SETVBUF_REVERSED} @defmac AC_FUNC_SETVBUF_REVERSED @acindex{FUNC_SETVBUF_REVERSED} @cvindex SETVBUF_REVERSED @c @fuindex setvbuf @prindex @code{setvbuf} Do nothing. Formerly, this macro checked whether @code{setvbuf} takes the buffering type as its second argument and the buffer pointer as the third, instead of the other way around, and defined @code{SETVBUF_REVERSED}. However, the last systems to have the problem were those based on SVR2, which became obsolete in 1987, and the macro is no longer needed. @end defmac @defmac AC_FUNC_WAIT3 @acindex{FUNC_WAIT3} @cvindex HAVE_WAIT3 @c @fuindex wait3 @prindex @code{wait3} If @code{wait3} is found and fills in the contents of its third argument (a @samp{struct rusage *}), which HP-UX does not do, define @code{HAVE_WAIT3}. These days portable programs should use @code{waitpid}, not @code{wait3}, as @code{wait3} has been removed from Posix. @end defmac @defmac AC_GCC_TRADITIONAL @acindex{GCC_TRADITIONAL} Replaced by @code{AC_PROG_GCC_TRADITIONAL} (@pxref{AC_PROG_GCC_TRADITIONAL}). @end defmac @defmac AC_GETGROUPS_T @acindex{GETGROUPS_T} Replaced by @code{AC_TYPE_GETGROUPS} (@pxref{AC_TYPE_GETGROUPS}). @end defmac @defmac AC_GETLOADAVG @acindex{GETLOADAVG} Replaced by @code{AC_FUNC_GETLOADAVG} (@pxref{AC_FUNC_GETLOADAVG}). @end defmac @defmac AC_GNU_SOURCE @acindex{GNU_SOURCE} @cvindex _GNU_SOURCE This macro is a platform-specific subset of @code{AC_USE_SYSTEM_EXTENSIONS} (@pxref{AC_USE_SYSTEM_EXTENSIONS}). @end defmac @defmac AC_HAVE_FUNCS @acindex{HAVE_FUNCS} Replaced by @code{AC_CHECK_FUNCS} (@pxref{AC_CHECK_FUNCS}). @end defmac @defmac AC_HAVE_HEADERS @acindex{HAVE_HEADERS} Replaced by @code{AC_CHECK_HEADERS} (@pxref{AC_CHECK_HEADERS}). @end defmac @defmac AC_HAVE_LIBRARY (@var{library}, @ovar{action-if-found}, @ @ovar{action-if-not-found}, @ovar{other-libraries}) @acindex{HAVE_LIBRARY} This macro is equivalent to calling @code{AC_CHECK_LIB} with a @var{function} argument of @code{main}. In addition, @var{library} can be written as any of @samp{foo}, @option{-lfoo}, or @samp{libfoo.a}. In all of those cases, the compiler is passed @option{-lfoo}. However, @var{library} cannot be a shell variable; it must be a literal name. @xref{AC_CHECK_LIB}. @end defmac @defmac AC_HAVE_POUNDBANG @acindex{HAVE_POUNDBANG} Replaced by @code{AC_SYS_INTERPRETER} (@pxref{AC_SYS_INTERPRETER}). @end defmac @defmac AC_HEADER_CHECK @acindex{HEADER_CHECK} Replaced by @code{AC_CHECK_HEADER} (@pxref{AC_CHECK_HEADER}). @end defmac @defmac AC_HEADER_EGREP @acindex{HEADER_EGREP} Replaced by @code{AC_EGREP_HEADER} (@pxref{AC_EGREP_HEADER}). @end defmac @anchor{AC_HEADER_TIME} @defmac AC_HEADER_TIME @acindex{HEADER_TIME} @cvindex TIME_WITH_SYS_TIME @hdrindex{time.h} @hdrindex{sys/time.h} @caindex header_time This macro used to check whether it was possible to include @file{time.h} and @file{sys/time.h} in the same source file, defining @code{TIME_WITH_SYS_TIME} if so. Nowadays, it is equivalent to @samp{AC_CHECK_HEADERS([sys/time.h])}, although it does still define @code{TIME_WITH_SYS_TIME} for compatibility's sake. @file{time.h} is universally present, and the systems on which @file{sys/time.h} conflicted with @file{time.h} are obsolete. @end defmac @defmac AC_HELP_STRING @acindex{HELP_STRING} Replaced by @code{AS_HELP_STRING} (@pxref{AS_HELP_STRING}). @end defmac @defmac AC_INIT (@var{unique-file-in-source-dir}) @acindex{INIT} Formerly @code{AC_INIT} used to have a single argument, and was equivalent to: @example AC_INIT AC_CONFIG_SRCDIR(@var{unique-file-in-source-dir}) @end example See @ref{AC_INIT} and @ref{AC_CONFIG_SRCDIR}. @end defmac @defmac AC_INLINE @acindex{INLINE} Replaced by @code{AC_C_INLINE} (@pxref{AC_C_INLINE}). @end defmac @defmac AC_INT_16_BITS @acindex{INT_16_BITS} @cvindex INT_16_BITS If the C type @code{int} is 16 bits wide, define @code{INT_16_BITS}. Use @samp{AC_CHECK_SIZEOF(int)} instead (@pxref{AC_CHECK_SIZEOF}). @end defmac @defmac AC_IRIX_SUN @acindex{IRIX_SUN} If on IRIX (Silicon Graphics Unix), add @option{-lsun} to output @code{LIBS}. If you were using it to get @code{getmntent}, use @code{AC_FUNC_GETMNTENT} instead. If you used it for the NIS versions of the password and group functions, use @samp{AC_CHECK_LIB(sun, getpwnam)}. Up to Autoconf 2.13, it used to be @example AC_CHECK_LIB([sun], [getmntent], [LIBS="-lsun $LIBS"]) @end example @noindent now it is defined as @example AC_FUNC_GETMNTENT AC_CHECK_LIB([sun], [getpwnam]) @end example @noindent See @ref{AC_FUNC_GETMNTENT} and @ref{AC_CHECK_LIB}. @end defmac @defmac AC_ISC_POSIX @acindex{ISC_POSIX} @ovindex LIBS This macro adds @option{-lcposix} to output variable @code{LIBS} if necessary for Posix facilities. Sun dropped support for the obsolete INTERACTIVE Systems Corporation Unix on 2006-07-23. New programs need not use this macro. It is implemented as @code{AC_SEARCH_LIBS([strerror], [cposix])} (@pxref{AC_SEARCH_LIBS}). @end defmac @defmac AC_LANG_C @acindex{LANG_C} Same as @samp{AC_LANG([C])} (@pxref{AC_LANG}). @end defmac @defmac AC_LANG_CPLUSPLUS @acindex{LANG_CPLUSPLUS} Same as @samp{AC_LANG([C++])} (@pxref{AC_LANG}). @end defmac @defmac AC_LANG_FORTRAN77 @acindex{LANG_FORTRAN77} Same as @samp{AC_LANG([Fortran 77])} (@pxref{AC_LANG}). @end defmac @defmac AC_LANG_RESTORE @acindex{LANG_RESTORE} Select the @var{language} that is saved on the top of the stack, as set by @code{AC_LANG_SAVE}, remove it from the stack, and call @code{AC_LANG(@var{language})}. @xref{Language Choice}, for the preferred way to change languages. @end defmac @defmac AC_LANG_SAVE @acindex{LANG_SAVE} Remember the current language (as set by @code{AC_LANG}) on a stack. The current language does not change. @code{AC_LANG_PUSH} is preferred (@pxref{AC_LANG_PUSH}). @end defmac @defmac AC_LINK_FILES (@var{source}@dots{}, @var{dest}@dots{}) @acindex{LINK_FILES} This is an obsolete version of @code{AC_CONFIG_LINKS} (@pxref{AC_CONFIG_LINKS}. An updated version of: @example AC_LINK_FILES(config/$machine.h config/$obj_format.h, host.h object.h) @end example @noindent is: @example AC_CONFIG_LINKS([host.h:config/$machine.h object.h:config/$obj_format.h]) @end example @end defmac @defmac AC_LN_S @acindex{LN_S} Replaced by @code{AC_PROG_LN_S} (@pxref{AC_PROG_LN_S}). @end defmac @defmac AC_LONG_64_BITS @acindex{LONG_64_BITS} @cvindex LONG_64_BITS Define @code{LONG_64_BITS} if the C type @code{long int} is 64 bits wide. Use the generic macro @samp{AC_CHECK_SIZEOF([long int])} instead (@pxref{AC_CHECK_SIZEOF}). @end defmac @defmac AC_LONG_DOUBLE @acindex{LONG_DOUBLE} If the C compiler supports a working @code{long double} type with more range or precision than the @code{double} type, define @code{HAVE_LONG_DOUBLE}. You should use @code{AC_TYPE_LONG_DOUBLE} or @code{AC_TYPE_LONG_DOUBLE_WIDER} instead. @xref{Particular Types}. @end defmac @defmac AC_LONG_FILE_NAMES @acindex{LONG_FILE_NAMES} Replaced by @example AC_SYS_LONG_FILE_NAMES @end example @noindent @xref{AC_SYS_LONG_FILE_NAMES}. @end defmac @defmac AC_MAJOR_HEADER @acindex{MAJOR_HEADER} Replaced by @code{AC_HEADER_MAJOR} (@pxref{AC_HEADER_MAJOR}). @end defmac @defmac AC_MEMORY_H @acindex{MEMORY_H} @cvindex NEED_MEMORY_H Used to define @code{NEED_MEMORY_H} if the @code{mem} functions were defined in @file{memory.h}. Today it is equivalent to @samp{AC_CHECK_HEADERS([memory.h])} (@pxref{AC_CHECK_HEADERS}). Adjust your code to get the @code{mem} functions from @file{string.h} instead. @end defmac @defmac AC_MINGW32 @acindex{MINGW32} Similar to @code{AC_CYGWIN} but checks for the MinGW compiler environment and sets @code{MINGW32}. Don't use this macro, the dignified means to check the nature of the host is using @code{AC_CANONICAL_HOST} (@pxref{Canonicalizing}). @end defmac @defmac AC_MINIX @acindex{MINIX} @cvindex _MINIX @cvindex _POSIX_SOURCE @cvindex _POSIX_1_SOURCE This macro is a platform-specific subset of @code{AC_USE_SYSTEM_EXTENSIONS} (@pxref{AC_USE_SYSTEM_EXTENSIONS}). @end defmac @defmac AC_MINUS_C_MINUS_O @acindex{MINUS_C_MINUS_O} Replaced by @code{AC_PROG_CC_C_O} (@pxref{AC_PROG_CC_C_O}). @end defmac @defmac AC_MMAP @acindex{MMAP} Replaced by @code{AC_FUNC_MMAP} (@pxref{AC_FUNC_MMAP}). @end defmac @defmac AC_MODE_T @acindex{MODE_T} Replaced by @code{AC_TYPE_MODE_T} (@pxref{AC_TYPE_MODE_T}). @end defmac @defmac AC_OBJEXT @acindex{OBJEXT} @ovindex OBJEXT Defined the output variable @code{OBJEXT} based on the output of the compiler, after .c files have been excluded. Typically set to @samp{o} if Posix, @samp{obj} if a DOS variant. Now the compiler checking macros handle this automatically. @end defmac @defmac AC_OBSOLETE (@var{this-macro-name}, @ovar{suggestion}) @acindex{OBSOLETE} Make M4 print a message to the standard error output warning that @var{this-macro-name} is obsolete, and giving the file and line number where it was called. @var{this-macro-name} should be the name of the macro that is calling @code{AC_OBSOLETE}. If @var{suggestion} is given, it is printed at the end of the warning message; for example, it can be a suggestion for what to use instead of @var{this-macro-name}. For instance @example AC_OBSOLETE([$0], [; use AC_CHECK_HEADERS(unistd.h) instead])dnl @end example @noindent You are encouraged to use @code{AU_DEFUN} instead, since it gives better services to the user (@pxref{AU_DEFUN}). @end defmac @defmac AC_OFF_T @acindex{OFF_T} Replaced by @code{AC_TYPE_OFF_T} (@pxref{AC_TYPE_OFF_T}). @end defmac @defmac AC_OUTPUT (@ovar{file}@dots{}, @ovar{extra-cmds}, @ovar{init-cmds}) @acindex{OUTPUT} The use of @code{AC_OUTPUT} with arguments is deprecated. This obsoleted interface is equivalent to: @example @group AC_CONFIG_FILES(@var{file}@dots{}) AC_CONFIG_COMMANDS([default], @var{extra-cmds}, @var{init-cmds}) AC_OUTPUT @end group @end example @noindent See @ref{AC_CONFIG_FILES}, @ref{AC_CONFIG_COMMANDS}, and @ref{AC_OUTPUT}. @end defmac @defmac AC_OUTPUT_COMMANDS (@var{extra-cmds}, @ovar{init-cmds}) @acindex{OUTPUT_COMMANDS} Specify additional shell commands to run at the end of @file{config.status}, and shell commands to initialize any variables from @command{configure}. This macro may be called multiple times. It is obsolete, replaced by @code{AC_CONFIG_COMMANDS} (@pxref{AC_CONFIG_COMMANDS}). Here is an unrealistic example: @example fubar=27 AC_OUTPUT_COMMANDS([echo this is extra $fubar, and so on.], [fubar=$fubar]) AC_OUTPUT_COMMANDS([echo this is another, extra, bit], [echo init bit]) @end example Aside from the fact that @code{AC_CONFIG_COMMANDS} requires an additional key, an important difference is that @code{AC_OUTPUT_COMMANDS} is quoting its arguments twice, unlike @code{AC_CONFIG_COMMANDS}. This means that @code{AC_CONFIG_COMMANDS} can safely be given macro calls as arguments: @example AC_CONFIG_COMMANDS(foo, [my_FOO()]) @end example @noindent Conversely, where one level of quoting was enough for literal strings with @code{AC_OUTPUT_COMMANDS}, you need two with @code{AC_CONFIG_COMMANDS}. The following lines are equivalent: @example @group AC_OUTPUT_COMMANDS([echo "Square brackets: []"]) AC_CONFIG_COMMANDS([default], [[echo "Square brackets: []"]]) @end group @end example @end defmac @defmac AC_PID_T @acindex{PID_T} Replaced by @code{AC_TYPE_PID_T} (@pxref{AC_TYPE_PID_T}). @end defmac @defmac AC_PREFIX @acindex{PREFIX} Replaced by @code{AC_PREFIX_PROGRAM} (@pxref{AC_PREFIX_PROGRAM}). @end defmac @defmac AC_PROG_CC_C89 @acindex{PROG_CC_C89} Now done by @code{AC_PROG_CC} (@pxref{AC_PROG_CC}). @end defmac @defmac AC_PROG_CC_C99 @acindex{PROG_CC_C99} Now done by @code{AC_PROG_CC} (@pxref{AC_PROG_CC}). @end defmac @defmac AC_PROG_CC_STDC @acindex{PROG_CC_STDC} Now done by @code{AC_PROG_CC} (@pxref{AC_PROG_CC}). @end defmac @defmac AC_PROGRAMS_CHECK @acindex{PROGRAMS_CHECK} Replaced by @code{AC_CHECK_PROGS} (@pxref{AC_CHECK_PROGS}). @end defmac @defmac AC_PROGRAMS_PATH @acindex{PROGRAMS_PATH} Replaced by @code{AC_PATH_PROGS} (@pxref{AC_PATH_PROGS}). @end defmac @defmac AC_PROGRAM_CHECK @acindex{PROGRAM_CHECK} Replaced by @code{AC_CHECK_PROG} (@pxref{AC_CHECK_PROG}). @end defmac @defmac AC_PROGRAM_EGREP @acindex{PROGRAM_EGREP} Replaced by @code{AC_EGREP_CPP} (@pxref{AC_EGREP_CPP}). @end defmac @defmac AC_PROGRAM_PATH @acindex{PROGRAM_PATH} Replaced by @code{AC_PATH_PROG} (@pxref{AC_PATH_PROG}). @end defmac @defmac AC_REMOTE_TAPE @acindex{REMOTE_TAPE} Removed because of limited usefulness. @end defmac @defmac AC_RESTARTABLE_SYSCALLS @acindex{RESTARTABLE_SYSCALLS} This macro was renamed @code{AC_SYS_RESTARTABLE_SYSCALLS}. However, these days portable programs should use @code{sigaction} with @code{SA_RESTART} if they want restartable system calls. They should not rely on @code{HAVE_RESTARTABLE_SYSCALLS}, since nowadays whether a system call is restartable is a dynamic issue, not a configuration-time issue. @end defmac @defmac AC_RETSIGTYPE @acindex{RETSIGTYPE} Replaced by @code{AC_TYPE_SIGNAL} (@pxref{AC_TYPE_SIGNAL}), which itself is obsolete when assuming C89 or better. @end defmac @defmac AC_RSH @acindex{RSH} Removed because of limited usefulness. @end defmac @defmac AC_SCO_INTL @acindex{SCO_INTL} @ovindex LIBS If on SCO Unix, add @option{-lintl} to output variable @code{LIBS}. This macro used to do this: @example AC_CHECK_LIB([intl], [strftime], [LIBS="-lintl $LIBS"]) @end example @noindent Now it just calls @code{AC_FUNC_STRFTIME} instead (@pxref{AC_FUNC_STRFTIME}). @end defmac @defmac AC_SETVBUF_REVERSED @acindex{SETVBUF_REVERSED} Replaced by @example AC_FUNC_SETVBUF_REVERSED @end example @noindent @xref{AC_FUNC_SETVBUF_REVERSED}. @end defmac @defmac AC_SET_MAKE @acindex{SET_MAKE} Replaced by @code{AC_PROG_MAKE_SET} (@pxref{AC_PROG_MAKE_SET}). @end defmac @defmac AC_SIZEOF_TYPE @acindex{SIZEOF_TYPE} Replaced by @code{AC_CHECK_SIZEOF} (@pxref{AC_CHECK_SIZEOF}). @end defmac @defmac AC_SIZE_T @acindex{SIZE_T} Replaced by @code{AC_TYPE_SIZE_T} (@pxref{AC_TYPE_SIZE_T}). @end defmac @defmac AC_STAT_MACROS_BROKEN @acindex{STAT_MACROS_BROKEN} Replaced by @code{AC_HEADER_STAT} (@pxref{AC_HEADER_STAT}). @end defmac @defmac AC_STDC_HEADERS @acindex{STDC_HEADERS} Replaced by @code{AC_HEADER_STDC} (@pxref{AC_HEADER_STDC}), which is itself obsolete. Nowadays it is safe to assume the facilities of C90 exist. @end defmac @defmac AC_STRCOLL @acindex{STRCOLL} Replaced by @code{AC_FUNC_STRCOLL} (@pxref{AC_FUNC_STRCOLL}). @end defmac @defmac AC_STRUCT_ST_BLKSIZE @acindex{STRUCT_ST_BLKSIZE} @cvindex HAVE_STRUCT_STAT_ST_BLKSIZE @cvindex HAVE_ST_BLKSIZE If @code{struct stat} contains an @code{st_blksize} member, define @code{HAVE_STRUCT_STAT_ST_BLKSIZE}. The former name, @code{HAVE_ST_BLKSIZE} is to be avoided, as its support will cease in the future. This macro is obsoleted, and should be replaced by @example AC_CHECK_MEMBERS([struct stat.st_blksize]) @end example @noindent @xref{AC_CHECK_MEMBERS}. @end defmac @defmac AC_STRUCT_ST_RDEV @acindex{STRUCT_ST_RDEV} @cvindex HAVE_ST_RDEV @cvindex HAVE_STRUCT_STAT_ST_RDEV If @code{struct stat} contains an @code{st_rdev} member, define @code{HAVE_STRUCT_STAT_ST_RDEV}. The former name for this macro, @code{HAVE_ST_RDEV}, is to be avoided as it will cease to be supported in the future. Actually, even the new macro is obsolete and should be replaced by: @example AC_CHECK_MEMBERS([struct stat.st_rdev]) @end example @noindent @xref{AC_CHECK_MEMBERS}. @end defmac @defmac AC_ST_BLKSIZE @acindex{ST_BLKSIZE} Replaced by @code{AC_CHECK_MEMBERS} (@pxref{AC_CHECK_MEMBERS}). @end defmac @defmac AC_ST_BLOCKS @acindex{ST_BLOCKS} Replaced by @code{AC_STRUCT_ST_BLOCKS} (@pxref{AC_STRUCT_ST_BLOCKS}). @end defmac @defmac AC_ST_RDEV @acindex{ST_RDEV} Replaced by @code{AC_CHECK_MEMBERS} (@pxref{AC_CHECK_MEMBERS}). @end defmac @defmac AC_SYS_RESTARTABLE_SYSCALLS @acindex{SYS_RESTARTABLE_SYSCALLS} @cvindex HAVE_RESTARTABLE_SYSCALLS If the system automatically restarts a system call that is interrupted by a signal, define @code{HAVE_RESTARTABLE_SYSCALLS}. This macro does not check whether system calls are restarted in general---it checks whether a signal handler installed with @code{signal} (but not @code{sigaction}) causes system calls to be restarted. It does not check whether system calls can be restarted when interrupted by signals that have no handler. These days portable programs should use @code{sigaction} with @code{SA_RESTART} if they want restartable system calls. They should not rely on @code{HAVE_RESTARTABLE_SYSCALLS}, since nowadays whether a system call is restartable is a dynamic issue, not a configuration-time issue. @end defmac @defmac AC_SYS_SIGLIST_DECLARED @acindex{SYS_SIGLIST_DECLARED} This macro was renamed @code{AC_DECL_SYS_SIGLIST}. However, even that name is obsolete, as the same functionality is now achieved via @code{AC_CHECK_DECLS} (@pxref{AC_CHECK_DECLS}). @end defmac @defmac AC_TEST_CPP @acindex{TEST_CPP} This macro was renamed @code{AC_TRY_CPP}, which in turn was replaced by @code{AC_PREPROC_IFELSE} (@pxref{AC_PREPROC_IFELSE}). @end defmac @defmac AC_TEST_PROGRAM @acindex{TEST_PROGRAM} This macro was renamed @code{AC_TRY_RUN}, which in turn was replaced by @code{AC_RUN_IFELSE} (@pxref{AC_RUN_IFELSE}). @end defmac @defmac AC_TIMEZONE @acindex{TIMEZONE} Replaced by @code{AC_STRUCT_TIMEZONE} (@pxref{AC_STRUCT_TIMEZONE}). @end defmac @defmac AC_TIME_WITH_SYS_TIME @acindex{TIME_WITH_SYS_TIME} Replaced by @code{AC_HEADER_TIME} (@pxref{AC_HEADER_TIME}), which is itself obsolete; nowadays one need only do @samp{AC_CHECK_HEADERS([sys/time.h])}. @end defmac @defmac AC_TRY_COMPILE (@var{includes}, @var{function-body}, @ @ovar{action-if-true}, @ovar{action-if-false}) @acindex{TRY_COMPILE} Same as: @example AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[@var{includes}]], [[@var{function-body}]])], [@var{action-if-true}], [@var{action-if-false}]) @end example @noindent @xref{Running the Compiler}. This macro double quotes both @var{includes} and @var{function-body}. For C and C++, @var{includes} is any @code{#include} statements needed by the code in @var{function-body} (@var{includes} is ignored if the currently selected language is Fortran or Fortran 77). The compiler and compilation flags are determined by the current language (@pxref{Language Choice}). @end defmac @defmac AC_TRY_CPP (@var{input}, @ovar{action-if-true}, @ovar{action-if-false}) @acindex{TRY_CPP} Same as: @example AC_PREPROC_IFELSE( [AC_LANG_SOURCE([[@var{input}]])], [@var{action-if-true}], [@var{action-if-false}]) @end example @noindent @xref{Running the Preprocessor}. This macro double quotes the @var{input}. @end defmac @defmac AC_TRY_LINK (@var{includes}, @var{function-body}, @ @ovar{action-if-true}, @ovar{action-if-false}) @acindex{TRY_LINK} Same as: @example AC_LINK_IFELSE( [AC_LANG_PROGRAM([[@var{includes}]], [[@var{function-body}]])], [@var{action-if-true}], [@var{action-if-false}]) @end example @noindent @xref{Running the Linker}. This macro double quotes both @var{includes} and @var{function-body}. Depending on the current language (@pxref{Language Choice}), create a test program to see whether a function whose body consists of @var{function-body} can be compiled and linked. If the file compiles and links successfully, run shell commands @var{action-if-found}, otherwise run @var{action-if-not-found}. This macro double quotes both @var{includes} and @var{function-body}. For C and C++, @var{includes} is any @code{#include} statements needed by the code in @var{function-body} (@var{includes} is ignored if the currently selected language is Fortran or Fortran 77). The compiler and compilation flags are determined by the current language (@pxref{Language Choice}), and in addition @code{LDFLAGS} and @code{LIBS} are used for linking. @end defmac @defmac AC_TRY_LINK_FUNC (@var{function}, @ovar{action-if-found}, @ @ovar{action-if-not-found}) @acindex{TRY_LINK_FUNC} This macro is equivalent to @example AC_LINK_IFELSE([AC_LANG_CALL([], [@var{function}])], [@var{action-if-found}], [@var{action-if-not-found}]) @end example @noindent @xref{Running the Linker}. @end defmac @defmac AC_TRY_RUN (@var{program}, @ovar{action-if-true}, @ @ovar{action-if-false}, @dvar{action-if-cross-compiling, AC_MSG_FAILURE}) @acindex{TRY_RUN} Same as: @example AC_RUN_IFELSE( [AC_LANG_SOURCE([[@var{program}]])], [@var{action-if-true}], [@var{action-if-false}], [@var{action-if-cross-compiling}]) @end example @noindent @xref{Runtime}. @end defmac @anchor{AC_TYPE_SIGNAL} @defmac AC_TYPE_SIGNAL @acindex{TYPE_SIGNAL} @cvindex RETSIGTYPE @hdrindex{signal.h} If @file{signal.h} declares @code{signal} as returning a pointer to a function returning @code{void}, define @code{RETSIGTYPE} to be @code{void}; otherwise, define it to be @code{int}. These days, it is portable to assume C89, and that signal handlers return @code{void}, without needing to use this macro or @code{RETSIGTYPE}. When targeting older K&R C, it is possible to define signal handlers as returning type @code{RETSIGTYPE}, and omit a return statement: @example @group RETSIGTYPE hup_handler () @{ @dots{} @} @end group @end example @end defmac @defmac AC_UID_T @acindex{UID_T} Replaced by @code{AC_TYPE_UID_T} (@pxref{AC_TYPE_UID_T}). @end defmac @defmac AC_UNISTD_H @acindex{UNISTD_H} Same as @samp{AC_CHECK_HEADERS([unistd.h])} (@pxref{AC_CHECK_HEADERS}), which is one of the tests done as a side effect by @code{AC_INCLUDES_DEFAULT} (@pxref{Default Includes}), so usually unnecessary to write explicitly. @end defmac @defmac AC_USG @acindex{USG} @cvindex USG Define @code{USG} if the BSD string functions (@code{bcopy}, @code{bzero}, @code{index}, @code{rindex}, etc) are @emph{not} defined in @file{strings.h}. Modern code should assume @file{string.h} exists and should use the ISO C string functions (@code{memmove}, @code{memset}, @code{strchr}, @code{strrchr}, etc) unconditionally. @file{strings.h} may be the only header that declares @code{strcasecmp}, @code{strncasecmp}, and @code{ffs}. @code{AC_INCLUDES_DEFAULT} checks for it (@pxref{Default Includes}); test @code{HAVE_STRINGS_H}. @end defmac @defmac AC_UTIME_NULL @acindex{UTIME_NULL} Replaced by @code{AC_FUNC_UTIME_NULL} (@pxref{AC_FUNC_UTIME_NULL}). @end defmac @defmac AC_VALIDATE_CACHED_SYSTEM_TUPLE (@ovar{cmd}) @acindex{VALIDATE_CACHED_SYSTEM_TUPLE} If the cache file is inconsistent with the current host, target and build system types, it used to execute @var{cmd} or print a default error message. This is now handled by default. @end defmac @defmac AC_VERBOSE (@var{result-description}) @acindex{VERBOSE} Replaced by @code{AC_MSG_RESULT} (@pxref{AC_MSG_RESULT}). @end defmac @defmac AC_VFORK @acindex{VFORK} Replaced by @code{AC_FUNC_FORK} (@pxref{AC_FUNC_FORK}). @end defmac @defmac AC_VPRINTF @acindex{VPRINTF} Replaced by @code{AC_FUNC_VPRINTF} (@pxref{AC_FUNC_VPRINTF}). @end defmac @defmac AC_WAIT3 @acindex{WAIT3} This macro was renamed @code{AC_FUNC_WAIT3}. However, these days portable programs should use @code{waitpid}, not @code{wait3}, as @code{wait3} has been removed from Posix. @end defmac @defmac AC_WARN @acindex{WARN} Replaced by @code{AC_MSG_WARN} (@pxref{AC_MSG_WARN}). @end defmac @defmac AC_WARNING (@var{message}) @acindex{WARNING} Replaced by @code{m4_warn} (@pxref{m4_warn}). @end defmac @defmac AC_WITH (@var{package}, @var{action-if-given}, @ @ovar{action-if-not-given}) @acindex{WITH} This is an obsolete version of @code{AC_ARG_WITH} that does not support providing a help string (@pxref{AC_ARG_WITH}). @end defmac @defmac AC_WORDS_BIGENDIAN @acindex{WORDS_BIGENDIAN} Replaced by @code{AC_C_BIGENDIAN} (@pxref{AC_C_BIGENDIAN}). @end defmac @defmac AC_XENIX_DIR @acindex{XENIX_DIR} @ovindex LIBS This macro used to add @option{-lx} to output variable @code{LIBS} if on Xenix. Also, if @file{dirent.h} is being checked for, added @option{-ldir} to @code{LIBS}. Now it is merely an alias of @code{AC_HEADER_DIRENT} instead, plus some code to detect whether running XENIX on which you should not depend: @example AC_MSG_CHECKING([for Xenix]) AC_EGREP_CPP([yes], [#if defined M_XENIX && !defined M_UNIX yes #endif], [AC_MSG_RESULT([yes]); XENIX=yes], [AC_MSG_RESULT([no]); XENIX=]) @end example @noindent Don't use this macro, the dignified means to check the nature of the host is using @code{AC_CANONICAL_HOST} (@pxref{Canonicalizing}). @end defmac @defmac AC_YYTEXT_POINTER @acindex{YYTEXT_POINTER} This macro was renamed @code{AC_DECL_YYTEXT}, which in turn was integrated into @code{AC_PROG_LEX} (@pxref{AC_PROG_LEX}). @end defmac @node Autoconf 1 @section Upgrading From Version 1 @cindex Upgrading autoconf @cindex Autoconf upgrading Autoconf version 2 is mostly backward compatible with version 1. However, it introduces better ways to do some things, and doesn't support some of the ugly things in version 1. So, depending on how sophisticated your @file{configure.ac} files are, you might have to do some manual work in order to upgrade to version 2. This chapter points out some problems to watch for when upgrading. Also, perhaps your @command{configure} scripts could benefit from some of the new features in version 2; the changes are summarized in the file @file{NEWS} in the Autoconf distribution. @menu * Changed File Names:: Files you might rename * Changed Makefiles:: New things to put in @file{Makefile.in} * Changed Macros:: Macro calls you might replace * Changed Results:: Changes in how to check test results * Changed Macro Writing:: Better ways to write your own macros @end menu @node Changed File Names @subsection Changed File Names If you have an @file{aclocal.m4} installed with Autoconf (as opposed to in a particular package's source directory), you must rename it to @file{acsite.m4}. @xref{autoconf Invocation}. If you distribute @file{install.sh} with your package, rename it to @file{install-sh} so @command{make} builtin rules don't inadvertently create a file called @file{install} from it. @code{AC_PROG_INSTALL} looks for the script under both names, but it is best to use the new name. If you were using @file{config.h.top}, @file{config.h.bot}, or @file{acconfig.h}, you still can, but you have less clutter if you use the @code{AH_} macros. @xref{Autoheader Macros}. @node Changed Makefiles @subsection Changed Makefiles Add @samp{@@CFLAGS@@}, @samp{@@CPPFLAGS@@}, and @samp{@@LDFLAGS@@} in your @file{Makefile.in} files, so they can take advantage of the values of those variables in the environment when @command{configure} is run. Doing this isn't necessary, but it's a convenience for users. Also add @samp{@@configure_input@@} in a comment to each input file for @code{AC_OUTPUT}, so that the output files contain a comment saying they were produced by @command{configure}. Automatically selecting the right comment syntax for all the kinds of files that people call @code{AC_OUTPUT} on became too much work. Add @file{config.log} and @file{config.cache} to the list of files you remove in @code{distclean} targets. If you have the following in @file{Makefile.in}: @example prefix = /usr/local exec_prefix = $(prefix) @end example @noindent you must change it to: @example prefix = @@prefix@@ exec_prefix = @@exec_prefix@@ @end example @noindent The old behavior of replacing those variables without @samp{@@} characters around them has been removed. @node Changed Macros @subsection Changed Macros Many of the macros were renamed in Autoconf version 2. You can still use the old names, but the new ones are clearer, and it's easier to find the documentation for them. @xref{Obsolete Macros}, for a table showing the new names for the old macros. Use the @command{autoupdate} program to convert your @file{configure.ac} to using the new macro names. @xref{autoupdate Invocation}. Some macros have been superseded by similar ones that do the job better, but are not call-compatible. If you get warnings about calling obsolete macros while running @command{autoconf}, you may safely ignore them, but your @command{configure} script generally works better if you follow the advice that is printed about what to replace the obsolete macros with. In particular, the mechanism for reporting the results of tests has changed. If you were using @command{echo} or @code{AC_VERBOSE} (perhaps via @code{AC_COMPILE_CHECK}), your @command{configure} script's output looks better if you switch to @code{AC_MSG_CHECKING} and @code{AC_MSG_RESULT}. @xref{Printing Messages}. Those macros work best in conjunction with cache variables. @xref{Caching Results}. @node Changed Results @subsection Changed Results If you were checking the results of previous tests by examining the shell variable @code{DEFS}, you need to switch to checking the values of the cache variables for those tests. @code{DEFS} no longer exists while @command{configure} is running; it is only created when generating output files. This difference from version 1 is because properly quoting the contents of that variable turned out to be too cumbersome and inefficient to do every time @code{AC_DEFINE} is called. @xref{Cache Variable Names}. For example, here is a @file{configure.ac} fragment written for Autoconf version 1: @example AC_HAVE_FUNCS(syslog) case "$DEFS" in *-DHAVE_SYSLOG*) ;; *) # syslog is not in the default libraries. See if it's in some other. saved_LIBS="$LIBS" for lib in bsd socket inet; do AC_CHECKING(for syslog in -l$lib) LIBS="-l$lib $saved_LIBS" AC_HAVE_FUNCS(syslog) case "$DEFS" in *-DHAVE_SYSLOG*) break ;; *) ;; esac LIBS="$saved_LIBS" done ;; esac @end example Here is a way to write it for version 2: @example AC_CHECK_FUNCS([syslog]) if test "x$ac_cv_func_syslog" = xno; then # syslog is not in the default libraries. See if it's in some other. for lib in bsd socket inet; do AC_CHECK_LIB([$lib], [syslog], [AC_DEFINE([HAVE_SYSLOG]) LIBS="-l$lib $LIBS"; break]) done fi @end example If you were working around bugs in @code{AC_DEFINE_UNQUOTED} by adding backslashes before quotes, you need to remove them. It now works predictably, and does not treat quotes (except back quotes) specially. @xref{Setting Output Variables}. All of the Boolean shell variables set by Autoconf macros now use @samp{yes} for the true value. Most of them use @samp{no} for false, though for backward compatibility some use the empty string instead. If you were relying on a shell variable being set to something like 1 or @samp{t} for true, you need to change your tests. @node Changed Macro Writing @subsection Changed Macro Writing When defining your own macros, you should now use @code{AC_DEFUN} instead of @code{define}. @code{AC_DEFUN} automatically calls @code{AC_PROVIDE} and ensures that macros called via @code{AC_REQUIRE} do not interrupt other macros, to prevent nested @samp{checking@dots{}} messages on the screen. There's no actual harm in continuing to use the older way, but it's less convenient and attractive. @xref{Macro Definitions}. You probably looked at the macros that came with Autoconf as a guide for how to do things. It would be a good idea to take a look at the new versions of them, as the style is somewhat improved and they take advantage of some new features. If you were doing tricky things with undocumented Autoconf internals (macros, variables, diversions), check whether you need to change anything to account for changes that have been made. Perhaps you can even use an officially supported technique in version 2 instead of kludging. Or perhaps not. To speed up your locally written feature tests, add caching to them. See whether any of your tests are of general enough usefulness to encapsulate them into macros that you can share. @node Autoconf 2.13 @section Upgrading From Version 2.13 @cindex Upgrading autoconf @cindex Autoconf upgrading The introduction of the previous section (@pxref{Autoconf 1}) perfectly suits this section@enddots{} @quotation Autoconf version 2.50 is mostly backward compatible with version 2.13. However, it introduces better ways to do some things, and doesn't support some of the ugly things in version 2.13. So, depending on how sophisticated your @file{configure.ac} files are, you might have to do some manual work in order to upgrade to version 2.50. This chapter points out some problems to watch for when upgrading. Also, perhaps your @command{configure} scripts could benefit from some of the new features in version 2.50; the changes are summarized in the file @file{NEWS} in the Autoconf distribution. @end quotation @menu * Changed Quotation:: Broken code which used to work * New Macros:: Interaction with foreign macros * Hosts and Cross-Compilation:: Bugward compatibility kludges * AC_LIBOBJ vs LIBOBJS:: LIBOBJS is a forbidden token * AC_ACT_IFELSE vs AC_TRY_ACT:: A more generic scheme for testing sources @end menu @node Changed Quotation @subsection Changed Quotation The most important changes are invisible to you: the implementation of most macros have completely changed. This allowed more factorization of the code, better error messages, a higher uniformity of the user's interface etc. Unfortunately, as a side effect, some construct which used to (miraculously) work might break starting with Autoconf 2.50. The most common culprit is bad quotation. For instance, in the following example, the message is not properly quoted: @example AC_INIT AC_CHECK_HEADERS(foo.h, , AC_MSG_ERROR(cannot find foo.h, bailing out)) AC_OUTPUT @end example @noindent Autoconf 2.13 simply ignores it: @example $ @kbd{autoconf-2.13; ./configure --silent} creating cache ./config.cache configure: error: cannot find foo.h $ @end example @noindent while Autoconf 2.50 produces a broken @file{configure}: @example $ @kbd{autoconf-2.50; ./configure --silent} configure: error: cannot find foo.h ./configure: exit: bad non-numeric arg `bailing' ./configure: exit: bad non-numeric arg `bailing' $ @end example The message needs to be quoted, and the @code{AC_MSG_ERROR} invocation too! @example AC_INIT([Example], [1.0], [bug-example@@example.org]) AC_CHECK_HEADERS([foo.h], [], [AC_MSG_ERROR([cannot find foo.h, bailing out])]) AC_OUTPUT @end example Many many (and many more) Autoconf macros were lacking proper quotation, including no less than@dots{} @code{AC_DEFUN} itself! @example $ @kbd{cat configure.in} AC_DEFUN([AC_PROG_INSTALL], [# My own much better version ]) AC_INIT AC_PROG_INSTALL AC_OUTPUT $ @kbd{autoconf-2.13} autoconf: Undefined macros: ***BUG in Autoconf--please report*** AC_FD_MSG ***BUG in Autoconf--please report*** AC_EPI configure.in:1:AC_DEFUN([AC_PROG_INSTALL], configure.in:5:AC_PROG_INSTALL $ @kbd{autoconf-2.50} $ @end example @node New Macros @subsection New Macros @cindex undefined macro @cindex @code{_m4_divert_diversion} While Autoconf was relatively dormant in the late 1990s, Automake provided Autoconf-like macros for a while. Starting with Autoconf 2.50 in 2001, Autoconf provided versions of these macros, integrated in the @code{AC_} namespace, instead of @code{AM_}. But in order to ease the upgrading via @command{autoupdate}, bindings to such @code{AM_} macros are provided. Unfortunately older versions of Automake (e.g., Automake 1.4) did not quote the names of these macros. Therefore, when @command{m4} finds something like @samp{AC_DEFUN(AM_TYPE_PTRDIFF_T, @dots{})} in @file{aclocal.m4}, @code{AM_TYPE_PTRDIFF_T} is expanded, replaced with its Autoconf definition. Fortunately Autoconf catches pre-@code{AC_INIT} expansions, and complains, in its own words: @example $ @kbd{cat configure.ac} AC_INIT([Example], [1.0], [bug-example@@example.org]) AM_TYPE_PTRDIFF_T $ @kbd{aclocal-1.4} $ @kbd{autoconf} aclocal.m4:17: error: m4_defn: undefined macro: _m4_divert_diversion aclocal.m4:17: the top level autom4te: m4 failed with exit status: 1 $ @end example Modern versions of Automake no longer define most of these macros, and properly quote the names of the remaining macros. If you must use an old Automake, do not depend upon macros from Automake as it is simply not its job to provide macros (but the one it requires itself): @example $ @kbd{cat configure.ac} AC_INIT([Example], [1.0], [bug-example@@example.org]) AM_TYPE_PTRDIFF_T $ @kbd{rm aclocal.m4} $ @kbd{autoupdate} autoupdate: `configure.ac' is updated $ @kbd{cat configure.ac} AC_INIT([Example], [1.0], [bug-example@@example.org]) AC_CHECK_TYPES([ptrdiff_t]) $ @kbd{aclocal-1.4} $ @kbd{autoconf} $ @end example @node Hosts and Cross-Compilation @subsection Hosts and Cross-Compilation @cindex Cross compilation Based on the experience of compiler writers, and after long public debates, many aspects of the cross-compilation chain have changed: @itemize @minus @item the relationship between the build, host, and target architecture types, @item the command line interface for specifying them to @command{configure}, @item the variables defined in @command{configure}, @item the enabling of cross-compilation mode. @end itemize @sp 1 The relationship between build, host, and target have been cleaned up: the chain of default is now simply: target defaults to host, host to build, and build to the result of @command{config.guess}. Nevertheless, in order to ease the transition from 2.13 to 2.50, the following transition scheme is implemented. @emph{Do not rely on it}, as it will be completely disabled in a couple of releases (we cannot keep it, as it proves to cause more problems than it cures). They all default to the result of running @command{config.guess}, unless you specify either @option{--build} or @option{--host}. In this case, the default becomes the system type you specified. If you specify both, and they're different, @command{configure} enters cross compilation mode, so it doesn't run any tests that require execution. Hint: if you mean to override the result of @command{config.guess}, prefer @option{--build} over @option{--host}. @sp 1 For backward compatibility, @command{configure} accepts a system type as an option by itself. Such an option overrides the defaults for build, host, and target system types. The following configure statement configures a cross toolchain that runs on NetBSD/alpha but generates code for GNU Hurd/sparc, which is also the build platform. @example ./configure --host=alpha-netbsd sparc-gnu @end example @sp 1 In Autoconf 2.13 and before, the variables @code{build}, @code{host}, and @code{target} had a different semantics before and after the invocation of @code{AC_CANONICAL_BUILD} etc. Now, the argument of @option{--build} is strictly copied into @code{build_alias}, and is left empty otherwise. After the @code{AC_CANONICAL_BUILD}, @code{build} is set to the canonicalized build type. To ease the transition, before, its contents is the same as that of @code{build_alias}. Do @emph{not} rely on this broken feature. For consistency with the backward compatibility scheme exposed above, when @option{--host} is specified but @option{--build} isn't, the build system is assumed to be the same as @option{--host}, and @samp{build_alias} is set to that value. Eventually, this historically incorrect behavior will go away. @sp 1 The former scheme to enable cross-compilation proved to cause more harm than good, in particular, it used to be triggered too easily, leaving regular end users puzzled in front of cryptic error messages. @command{configure} could even enter cross-compilation mode only because the compiler was not functional. This is mainly because @command{configure} used to try to detect cross-compilation, instead of waiting for an explicit flag from the user. Now, @command{configure} enters cross-compilation mode if and only if @option{--host} is passed. That's the short documentation. To ease the transition between 2.13 and its successors, a more complicated scheme is implemented. @emph{Do not rely on the following}, as it will be removed in the near future. If you specify @option{--host}, but not @option{--build}, when @command{configure} performs the first compiler test it tries to run an executable produced by the compiler. If the execution fails, it enters cross-compilation mode. This is fragile. Moreover, by the time the compiler test is performed, it may be too late to modify the build-system type: other tests may have already been performed. Therefore, whenever you specify @option{--host}, be sure to specify @option{--build} too. @example ./configure --build=x86_64-pc-linux-gnu --host=x86_64-w64-mingw64 @end example @noindent enters cross-compilation mode. The former interface, which consisted in setting the compiler to a cross-compiler without informing @command{configure} is obsolete. For instance, @command{configure} fails if it can't run the code generated by the specified compiler if you configure as follows: @example ./configure CC=x86_64-w64-mingw64-gcc @end example @node AC_LIBOBJ vs LIBOBJS @subsection @code{AC_LIBOBJ} vs.@: @code{LIBOBJS} Up to Autoconf 2.13, the replacement of functions was triggered via the variable @code{LIBOBJS}. Since Autoconf 2.50, the macro @code{AC_LIBOBJ} should be used instead (@pxref{Generic Functions}). Starting at Autoconf 2.53, the use of @code{LIBOBJS} is an error. This change is mandated by the unification of the GNU Build System components. In particular, the various fragile techniques used to parse a @file{configure.ac} are all replaced with the use of traces. As a consequence, any action must be traceable, which obsoletes critical variable assignments. Fortunately, @code{LIBOBJS} was the only problem, and it can even be handled gracefully (read, ``without your having to change something''). There were two typical uses of @code{LIBOBJS}: asking for a replacement function, and adjusting @code{LIBOBJS} for Automake and/or Libtool. @sp 1 As for function replacement, the fix is immediate: use @code{AC_LIBOBJ}. For instance: @example LIBOBJS="$LIBOBJS fnmatch.o" LIBOBJS="$LIBOBJS malloc.$ac_objext" @end example @noindent should be replaced with: @example AC_LIBOBJ([fnmatch]) AC_LIBOBJ([malloc]) @end example @sp 1 @ovindex LIBOBJDIR When used with Automake 1.10 or newer, a suitable value for @code{LIBOBJDIR} is set so that the @code{LIBOBJS} and @code{LTLIBOBJS} can be referenced from any @file{Makefile.am}. Even without Automake, arranging for @code{LIBOBJDIR} to be set correctly enables referencing @code{LIBOBJS} and @code{LTLIBOBJS} in another directory. The @code{LIBOBJDIR} feature is experimental. @node AC_ACT_IFELSE vs AC_TRY_ACT @subsection @code{AC_@var{ACT}_IFELSE} vs.@: @code{AC_TRY_@var{ACT}} @c the anchor keeps the old node name, to try to avoid breaking links @anchor{AC_FOO_IFELSE vs AC_TRY_FOO} @acindex{@var{ACT}_IFELSE} @acindex{TRY_@var{ACT}} Since Autoconf 2.50, internal codes uses @code{AC_PREPROC_IFELSE}, @code{AC_COMPILE_IFELSE}, @code{AC_LINK_IFELSE}, and @code{AC_RUN_IFELSE} on one hand and @code{AC_LANG_SOURCE}, and @code{AC_LANG_PROGRAM} on the other hand instead of the deprecated @code{AC_TRY_CPP}, @code{AC_TRY_COMPILE}, @code{AC_TRY_LINK}, and @code{AC_TRY_RUN}. The motivations where: @itemize @minus @item a more consistent interface: @code{AC_TRY_COMPILE} etc.@: were double quoting their arguments; @item the combinatorial explosion is solved by decomposing on the one hand the generation of sources, and on the other hand executing the program; @item this scheme helps supporting more languages than plain C and C++. @end itemize In addition to the change of syntax, the philosophy has changed too: while emphasis was put on speed at the expense of accuracy, today's Autoconf promotes accuracy of the testing framework at, ahem@dots{}, the expense of speed. As a perfect example of what is @emph{not} to be done, here is how to find out whether a header file contains a particular declaration, such as a typedef, a structure, a structure member, or a function. Use @code{AC_EGREP_HEADER} instead of running @code{grep} directly on the header file; on some systems the symbol might be defined in another header file that the file you are checking includes. As a (bad) example, here is how you should not check for C preprocessor symbols, either defined by header files or predefined by the C preprocessor: using @code{AC_EGREP_CPP}: @example @group AC_EGREP_CPP(yes, [#ifdef _AIX yes #endif ], is_aix=yes, is_aix=no) @end group @end example The above example, properly written would (i) use @code{AC_LANG_PROGRAM}, and (ii) run the compiler: @example @group AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[#ifndef _AIX error: This isn't AIX! #endif ]])], [is_aix=yes], [is_aix=no]) @end group @end example @c ============================= Generating Test Suites with Autotest @node Using Autotest @chapter Generating Test Suites with Autotest @cindex Autotest @display @strong{N.B.: This section describes a feature which is still stabilizing. Although we believe that Autotest is useful as-is, this documentation describes an interface which might change in the future: do not depend upon Autotest without subscribing to the Autoconf mailing lists.} @end display It is paradoxical that portable projects depend on nonportable tools to run their test suite. Autoconf by itself is the paragon of this problem: although it aims at perfectly portability, up to 2.13 its test suite was using DejaGNU, a rich and complex testing framework, but which is far from being standard on Posix systems. Worse yet, it was likely to be missing on the most fragile platforms, the very platforms that are most likely to torture Autoconf and exhibit deficiencies. To circumvent this problem, many package maintainers have developed their own testing framework, based on simple shell scripts whose sole outputs are exit status values describing whether the test succeeded. Most of these tests share common patterns, and this can result in lots of duplicated code and tedious maintenance. Following exactly the same reasoning that yielded to the inception of Autoconf, Autotest provides a test suite generation framework, based on M4 macros building a portable shell script. The suite itself is equipped with automatic logging and tracing facilities which greatly diminish the interaction with bug reporters, and simple timing reports. Autoconf itself has been using Autotest for years, and we do attest that it has considerably improved the strength of the test suite and the quality of bug reports. Other projects are known to use some generation of Autotest, such as Bison, GNU Wdiff, GNU Tar, each of them with different needs, and this usage has validated Autotest as a general testing framework. Nonetheless, compared to DejaGNU, Autotest is inadequate for interactive tool testing, which is probably its main limitation. @menu * Using an Autotest Test Suite:: Autotest and the user * Writing Testsuites:: Autotest macros * testsuite Invocation:: Running @command{testsuite} scripts * Making testsuite Scripts:: Using autom4te to create @command{testsuite} @end menu @node Using an Autotest Test Suite @section Using an Autotest Test Suite @menu * testsuite Scripts:: The concepts of Autotest * Autotest Logs:: Their contents @end menu @node testsuite Scripts @subsection @command{testsuite} Scripts @cindex @command{testsuite} Generating testing or validation suites using Autotest is rather easy. The whole validation suite is held in a file to be processed through @command{autom4te}, itself using GNU M4 under the hood, to produce a stand-alone Bourne shell script which then gets distributed. Neither @command{autom4te} nor GNU M4 are needed at the installer's end. @cindex test group Each test of the validation suite should be part of some test group. A @dfn{test group} is a sequence of interwoven tests that ought to be executed together, usually because one test in the group creates data files that a later test in the same group needs to read. Complex test groups make later debugging more tedious. It is much better to keep only a few tests per test group. Ideally there is only one test per test group. For all but the simplest packages, some file such as @file{testsuite.at} does not fully hold all test sources, as these are often easier to maintain in separate files. Each of these separate files holds a single test group, or a sequence of test groups all addressing some common functionality in the package. In such cases, @file{testsuite.at} merely initializes the validation suite, and sometimes does elementary health checking, before listing include statements for all other test files. The special file @file{package.m4}, containing the identification of the package, is automatically included if found. A convenient alternative consists in moving all the global issues (local Autotest macros, elementary health checking, and @code{AT_INIT} invocation) into the file @code{local.at}, and making @file{testsuite.at} be a simple list of @code{m4_include}s of sub test suites. In such case, generating the whole test suite or pieces of it is only a matter of choosing the @command{autom4te} command line arguments. The validation scripts that Autotest produces are by convention called @command{testsuite}. When run, @command{testsuite} executes each test group in turn, producing only one summary line per test to say if that particular test succeeded or failed. At end of all tests, summarizing counters get printed. One debugging directory is left for each test group which failed, if any: such directories are named @file{testsuite.dir/@var{nn}}, where @var{nn} is the sequence number of the test group, and they include: @itemize @bullet @item a debugging script named @file{run} which reruns the test in @dfn{debug mode} (@pxref{testsuite Invocation}). The automatic generation of debugging scripts has the purpose of easing the chase for bugs. @item all the files created with @code{AT_DATA} @item all the Erlang source code files created with @code{AT_CHECK_EUNIT} @item a log of the run, named @file{testsuite.log} @end itemize In the ideal situation, none of the tests fail, and consequently no debugging directory is left behind for validation. It often happens in practice that individual tests in the validation suite need to get information coming out of the configuration process. Some of this information, common for all validation suites, is provided through the file @file{atconfig}, automatically created by @code{AC_CONFIG_TESTDIR}. For configuration information which your testing environment specifically needs, you might prepare an optional file named @file{atlocal.in}, instantiated by @code{AC_CONFIG_FILES}. The configuration process produces @file{atconfig} and @file{atlocal} out of these two input files, and these two produced files are automatically read by the @file{testsuite} script. Here is a diagram showing the relationship between files. @noindent Files used in preparing a software package for distribution: @example [package.m4] -->. \ subfile-1.at ->. [local.at] ---->+ ... \ \ subfile-i.at ---->-- testsuite.at -->-- autom4te* -->testsuite ... / subfile-n.at ->' @end example @noindent Files used in configuring a software package: @example .--> atconfig / [atlocal.in] --> config.status* --< \ `--> [atlocal] @end example @noindent Files created during test suite execution: @example atconfig -->. .--> testsuite.log \ / >-- testsuite* --< / \ [atlocal] ->' `--> [testsuite.dir] @end example @node Autotest Logs @subsection Autotest Logs When run, the test suite creates a log file named after itself, e.g., a test suite named @command{testsuite} creates @file{testsuite.log}. It contains a lot of information, usually more than maintainers actually need, but therefore most of the time it contains all that is needed: @table @asis @item command line arguments A bad but unfortunately widespread habit consists of setting environment variables before the command, such as in @samp{CC=my-home-grown-cc ./testsuite}. The test suite does not know this change, hence (i) it cannot report it to you, and (ii) it cannot preserve the value of @code{CC} for subsequent runs. Autoconf faced exactly the same problem, and solved it by asking users to pass the variable definitions as command line arguments. Autotest requires this rule, too, but has no means to enforce it; the log then contains a trace of the variables that were changed by the user. @item @file{ChangeLog} excerpts The topmost lines of all the @file{ChangeLog} files found in the source hierarchy. This is especially useful when bugs are reported against development versions of the package, since the version string does not provide sufficient information to know the exact state of the sources the user compiled. Of course, this relies on the use of a @file{ChangeLog}. @item build machine Running a test suite in a cross-compile environment is not an easy task, since it would mean having the test suite run on a machine @var{build}, while running programs on a machine @var{host}. It is much simpler to run both the test suite and the programs on @var{host}, but then, from the point of view of the test suite, there remains a single environment, @var{host} = @var{build}. The log contains relevant information on the state of the @var{build} machine, including some important environment variables. @c FIXME: How about having an M4sh macro to say `hey, log the value @c of `@dots{}'? This would help both Autoconf and Autotest. @item tested programs The absolute file name and answers to @option{--version} of the tested programs (see @ref{Writing Testsuites}, @code{AT_TESTED}). @item configuration log The contents of @file{config.log}, as created by @command{configure}, are appended. It contains the configuration flags and a detailed report on the configuration itself. @end table @node Writing Testsuites @section Writing @file{testsuite.at} The @file{testsuite.at} is a Bourne shell script making use of special Autotest M4 macros. It often contains a call to @code{AT_INIT} near its beginning followed by one call to @code{m4_include} per source file for tests. Each such included file, or the remainder of @file{testsuite.at} if include files are not used, contain a sequence of test groups. Each test group begins with a call to @code{AT_SETUP}, then an arbitrary number of shell commands or calls to @code{AT_CHECK}, and then completes with a call to @code{AT_CLEANUP}. Multiple test groups can be categorized by a call to @code{AT_BANNER}. All of the public Autotest macros have all-uppercase names in the namespace @samp{^AT_} to prevent them from accidentally conflicting with other text; Autoconf also reserves the namespace @samp{^_AT_} for internal macros. All shell variables used in the testsuite for internal purposes have mostly-lowercase names starting with @samp{at_}. Autotest also uses here-document delimiters in the namespace @samp{^_AT[A-Z]}, and makes use of the file system namespace @samp{^at-}. Since Autoconf is built on top of M4sugar (@pxref{Programming in M4sugar}) and M4sh (@pxref{Programming in M4sh}), you must also be aware of those namespaces (@samp{^_?\(m4\|AS\)_}). In general, you @emph{should not use} the namespace of a package that does not own the macro or shell code you are writing. @defmac AT_INIT (@ovar{name}) @atindex{INIT} @c FIXME: Not clear, plus duplication of the information. Initialize Autotest. Giving a @var{name} to the test suite is encouraged if your package includes several test suites. Before this macro is called, @code{AT_PACKAGE_STRING} and @code{AT_PACKAGE_BUGREPORT} must be defined, which are used to display information about the testsuite to the user. Typically, these macros are provided by a file @file{package.m4} built by @command{make} (@pxref{Making testsuite Scripts}), in order to inherit the package name, version, and bug reporting address from @file{configure.ac}. @end defmac @defmac AT_COPYRIGHT (@var{copyright-notice}) @atindex{COPYRIGHT} @cindex Copyright Notice State that, in addition to the Free Software Foundation's copyright on the Autotest macros, parts of your test suite are covered by @var{copyright-notice}. The @var{copyright-notice} shows up in both the head of @command{testsuite} and in @samp{testsuite --version}. @end defmac @defmac AT_ARG_OPTION (@var{options}, @var{help-text}, @ @ovar{action-if-given}, @ovar{action-if-not-given}) @atindex{ARG_OPTION} @vrindex at_arg_@var{option} Accept options from the space-separated list @var{options}, a list that has leading dashes removed from the options. Long options will be prefixed with @samp{--}, single-character options with @samp{-}. The first word in this list is the primary @var{option}, any others are assumed to be short-hand aliases. The variable associated with it is @code{at_arg_@var{option}}, with any dashes in @var{option} replaced with underscores. If the user passes @option{--@var{option}} to the @command{testsuite}, the variable will be set to @samp{:}. If the user does not pass the option, or passes @option{--no-@var{option}}, then the variable will be set to @samp{false}. @vrindex at_optarg @vrindex at_optarg_@var{option} @var{action-if-given} is run each time the option is encountered; here, the variable @code{at_optarg} will be set to @samp{:} or @samp{false} as appropriate. @code{at_optarg} is actually just a copy of @code{at_arg_@var{option}}. @var{action-if-not-given} will be run once after option parsing is complete and if no option from @var{options} was used. @var{help-text} is added to the end of the list of options shown in @command{testsuite --help} (@pxref{AS_HELP_STRING}). It is recommended that you use a package-specific prefix to @var{options} names in order to avoid clashes with future Autotest built-in options. @end defmac @defmac AT_ARG_OPTION_ARG (@var{options}, @var{help-text}, @ @ovar{action-if-given}, @ovar{action-if-not-given}) @atindex{ARG_OPTION_ARG} @vrindex at_arg_@var{option} Accept options with arguments from the space-separated list @var{options}, a list that has leading dashes removed from the options. Long options will be prefixed with @samp{--}, single-character options with @samp{-}. The first word in this list is the primary @var{option}, any others are assumed to be short-hand aliases. The variable associated with it is @code{at_arg_@var{option}}, with any dashes in @var{option} replaced with underscores. If the user passes @option{--@var{option}=@var{arg}} or @option{--@var{option} @var{arg}} to the @command{testsuite}, the variable will be set to @samp{@var{arg}}. @vrindex at_optarg @var{action-if-given} is run each time the option is encountered; here, the variable @code{at_optarg} will be set to @samp{@var{arg}}. @code{at_optarg} is actually just a copy of @code{at_arg_@var{option}}. @var{action-if-not-given} will be run once after option parsing is complete and if no option from @var{options} was used. @var{help-text} is added to the end of the list of options shown in @command{testsuite --help} (@pxref{AS_HELP_STRING}). It is recommended that you use a package-specific prefix to @var{options} names in order to avoid clashes with future Autotest built-in options. @end defmac @defmac AT_COLOR_TESTS @atindex{COLOR_TESTS} Enable colored test results by default when the output is connected to a terminal. @end defmac @defmac AT_TESTED (@var{executables}) @atindex{TESTED} Log the file name and answer to @option{--version} of each program in space-separated list @var{executables}. Several invocations register new executables, in other words, don't fear registering one program several times. Autotest test suites rely on @env{PATH} to find the tested program. This avoids the need to generate absolute names of the various tools, and makes it possible to test installed programs. Therefore, knowing which programs are being exercised is crucial to understanding problems in the test suite itself, or its occasional misuses. It is a good idea to also subscribe foreign programs you depend upon, to avoid incompatible diagnostics. @var{executables} is implicitly wrapped in shell double quotes, but it will still use shell variable expansion (@samp{$}), command substitution (@samp{`}), and backslash escaping (@samp{\}). In particular, the @env{EXEEXT} variable is available if it is passed to the testsuite via @file{atlocal} or @file{atconfig}. @end defmac @defmac AT_PREPARE_TESTS (@var{shell-code}) @atindex{PREPARE_TESTS} Execute @var{shell-code} in the main testsuite process, after initializing the test suite and processing command-line options, but before running any tests. If this macro is used several times, all of the @var{shell-code}s will be executed, in the order they appeared in @file{testsuite.at}. One reason to use @code{AT_PREPARE_TESTS} is when the programs under test are sensitive to environment variables: you can unset all these variables or reset them to safe values in @var{shell-code}. @var{shell-code} is only executed if at least one test is going to be run. In particular, it will not be executed if any of the @option{--help}, @option{--version}, @option{--list}, or @option{--clean} options are given to @command{testsuite} (@pxref{testsuite Invocation}). @end defmac @defmac AT_PREPARE_EACH_TEST (@var{shell-code}) @atindex{AT_PREPARE_EACH_TEST} Execute @var{shell-code} in each test group's subshell, at the point of the @code{AT_SETUP} that starts the test group. @end defmac @defmac AT_TEST_HELPER_FN (@var{name}, @var{args}, @var{description}, @var{code}) Define a shell function that will be available to the code for each test group. Its name will be @code{ath_fn_@var{name}}, and its body will be @var{code}. (The prefix prevents name conflicts with shell functions defined by M4sh and Autotest.) @var{args} should describe the function's arguments and @var{description} what it does; these are used only for documentation comments in the generated testsuite script. @end defmac @sp 1 @defmac AT_BANNER (@var{test-category-name}) @atindex{BANNER} This macro identifies the start of a category of related test groups. When the resulting @file{testsuite} is invoked with more than one test group to run, its output will include a banner containing @var{test-category-name} prior to any tests run from that category. The banner should be no more than about 40 or 50 characters. A blank banner indicates uncategorized tests; an empty line will be inserted after tests from an earlier category, effectively ending that category. @end defmac @defmac AT_SETUP (@var{test-group-name}) @atindex{SETUP} This macro starts a group of related tests, all to be executed in the same subshell. It accepts a single argument, which holds a few words (no more than about 30 or 40 characters) quickly describing the purpose of the test group being started. @var{test-group-name} must not expand to unbalanced quotes, although quadrigraphs can be used. @end defmac @defmac AT_KEYWORDS (@var{keywords}) @atindex{KEYWORDS} Associate the space-separated list of @var{keywords} to the enclosing test group. This makes it possible to run ``slices'' of the test suite. For instance, if some of your test groups exercise some @samp{foo} feature, then using @samp{AT_KEYWORDS(foo)} lets you run @samp{./testsuite -k foo} to run exclusively these test groups. The @var{test-group-name} of the test group is automatically recorded to @code{AT_KEYWORDS}. Several invocations within a test group accumulate new keywords. In other words, don't fear registering the same keyword several times in a test group. @end defmac @defmac AT_CAPTURE_FILE (@var{file}) @atindex{CAPTURE_FILE} If the current test group fails, log the contents of @var{file}. Several identical calls within one test group have no additional effect. @end defmac @defmac AT_FAIL_IF (@var{shell-condition}) @atindex{FAIL_IF} Make the test group fail and skip the rest of its execution, if @var{shell-condition} is true. @var{shell-condition} is a shell expression such as a @code{test} command. Tests before @command{AT_FAIL_IF} will be executed and may still cause the test group to be skipped. You can instantiate this macro many times from within the same test group. You should use this macro only for very simple failure conditions. If the @var{shell-condition} could emit any kind of output you should instead use @command{AT_CHECK} like @example AT_CHECK([if @var{shell-condition}; then exit 99; fi]) @end example @noindent so that such output is properly recorded in the @file{testsuite.log} file. @end defmac @defmac AT_SKIP_IF (@var{shell-condition}) @atindex{SKIP_IF} Determine whether the test should be skipped because it requires features that are unsupported on the machine under test. @var{shell-condition} is a shell expression such as a @code{test} command. Tests before @command{AT_SKIP_IF} will be executed and may still cause the test group to fail. You can instantiate this macro many times from within the same test group. You should use this macro only for very simple skip conditions. If the @var{shell-condition} could emit any kind of output you should instead use @command{AT_CHECK} like @example AT_CHECK([if @var{shell-condition}; then exit 77; fi]) @end example @noindent so that such output is properly recorded in the @file{testsuite.log} file. @end defmac @defmac AT_XFAIL_IF (@var{shell-condition}) @atindex{XFAIL_IF} Determine whether the test is expected to fail because it is a known bug (for unsupported features, you should skip the test). @var{shell-condition} is a shell expression such as a @code{test} command; you can instantiate this macro many times from within the same test group, and one of the conditions is enough to turn the test into an expected failure. @end defmac @defmac AT_CLEANUP @atindex{CLEANUP} End the current test group. @end defmac @sp 1 @defmac AT_DATA (@var{file}, @var{contents}) @defmacx AT_DATA_UNQUOTED (@var{file}, @var{contents}) @atindex{DATA} Initialize an input data @var{file} with given @var{contents}. Of course, the @var{contents} have to be properly quoted between square brackets to protect against included commas or spurious M4 expansion. @var{contents} must be empty or end with a newline. @var{file} must be a single shell word that expands into a single file name. The difference between @code{AT_DATA} and @code{AT_DATA_UNQUOTED} is that only the latter performs shell variable expansion (@samp{$}), command substitution (@samp{`}), and backslash escaping (@samp{\}) on @var{contents}. @end defmac @defmac AT_CHECK (@var{commands}, @dvar{status, 0}, @ovar{stdout}, @ @ovar{stderr}, @ovar{run-if-fail}, @ovar{run-if-pass}) @defmacx AT_CHECK_UNQUOTED (@var{commands}, @dvar{status, 0}, @ovar{stdout}, @ @ovar{stderr}, @ovar{run-if-fail}, @ovar{run-if-pass}) @atindex{CHECK} @atindex{CHECK_UNQUOTED} @vrindex at_status Perform a test, by running the shell @var{commands} in a subshell. @var{commands} is output as-is, so shell expansions are honored. These commands are expected to have a final exit status of @var{status}, and to produce output as described by @var{stdout} and @var{stderr} (see below). This macro must be invoked in between @code{AT_SETUP} and @code{AT_CLEANUP}. If @var{commands} exit with unexpected status 77, then the rest of the test group is skipped. If @var{commands} exit with unexpected status 99, then the test group is immediately failed; this is called a @emph{hard failure}. Otherwise, the test is considered to have succeeeded if all of the status, stdout, and stderr expectations were met. If @var{run-if-fail} is nonempty, it provides extra shell commands to run when the test fails; if @var{run-if-pass} is nonempty, it provides extra shell commands to run when the test succeeds. These commands are @emph{not} run in a subshell, and they are not run when the test group is skipped (exit code 77) or hard-failed (exit code 99). They may change whether the test group is considered to have succeeded, by modifying the shell variable @code{at_failed}; set it to @code{:} to indicate that the test group has failed, or @code{false} to indicate that it has succeeded. The exit status of @var{commands} is available to @var{run-if-fail} and @var{run-if-pass} commands in the @code{at_status} shell variable. The output from @var{commands} is also available, in the files named by the @code{at_stdout} and @code{at_stderr} variables. If @var{status} is the literal @samp{ignore}, then the exit status of @var{commands} is not checked, except for the special cases of 77 (skip) and 99 (hard failure). The existence of hard failures allows one to mark a test as an expected failure with @code{AT_XFAIL_IF} because a feature has not yet been implemented, but to still distinguish between gracefully handling the missing feature and dumping core. If the value of the @var{stdout} or @var{stderr} parameter is one of the literals in the following table, then the test treats the output according to the rules of that literal. @table @samp @item ignore The content of the output is ignored, but still captured in the test group log (if the testsuite is run with the @option{-v} option, the test group log is displayed as the test is run; if the test group later fails, the test group log is also copied into the overall testsuite log). This action is valid for both @var{stdout} and @var{stderr}. @item ignore-nolog The content of the output is ignored, and nothing is captured in the log files. If @var{commands} are likely to produce binary output (including long lines) or large amounts of output, then logging the output can make it harder to locate details related to subsequent tests within the group, and could potentially corrupt terminal display of a user running @command{testsuite -v}. This action is valid for both @var{stdout} and @var{stderr}. @item stdout Only valid as the @var{stdout} parameter. Capture the content of standard output in both a file named @file{stdout} and the test group log. Subsequent commands in the test group can then post-process the file. This action is often used when it is desired to use @command{grep} to look for a substring in the output, or when the output must be post-processed to normalize error messages into a common form. @item stderr Only valid as the @var{stderr} parameter. Capture the content of standard error in both a file named @file{stderr} and the test group log. @item stdout-nolog @itemx stderr-nolog Like @samp{stdout} or @samp{stderr}, except that the captured output is not duplicated into the test group log. This action is particularly useful for an intermediate check that produces large amounts of data, which will be followed by another check that filters down to the relevant data, as it makes it easier to locate details in the log. @item expout Only valid as the @var{stdout} parameter. Compare standard output with the previously created file @file{expout}, and list any differences in the testsuite log. @item experr Only valid as the @var{stderr} parameter. Compare standard error with the previously created file @file{experr}, and list any differences in the testsuite log. @end table Otherwise, the values of the @var{stdout} and @var{stderr} parameters are treated as text that must exactly match the output given by @var{commands} on standard output and standard error (including an empty parameter for no output); any differences are captured in the testsuite log and the test is failed (unless an unexpected exit status of 77 skipped the test instead). @code{AT_CHECK_UNQUOTED} performs shell variable expansion (@samp{$}), command substitution (@samp{`}), and backslash escaping (@samp{\}) on comparison text given in the @var{stdout} and @var{stderr} parameters; @code{AT_CHECK} does not. There is no difference in the interpretation of @var{commands}. @end defmac @defmac AT_CHECK_EUNIT (@var{module}, @var{test-spec}, @ovar{erlflags}, @ @ovar{run-if-fail}, @ovar{run-if-pass}) @atindex{CHECK_EUNIT} Initialize and execute an Erlang module named @var{module} that performs tests following the @var{test-spec} EUnit test specification. @var{test-spec} must be a valid EUnit test specification, as defined in the @uref{https://@/erlang.org/@/doc/@/apps/@/eunit/@/index.html, EUnit Reference Manual}. @var{erlflags} are optional command-line options passed to the Erlang interpreter to execute the test Erlang module. Typically, @var{erlflags} defines at least the paths to directories containing the compiled Erlang modules under test, as @samp{-pa path1 path2 ...}. For example, the unit tests associated with Erlang module @samp{testme}, which compiled code is in subdirectory @file{src}, can be performed with: @example AT_CHECK_EUNIT([testme_testsuite], [@{module, testme@}], [-pa "$@{abs_top_builddir@}/src"]) @end example This macro must be invoked in between @code{AT_SETUP} and @code{AT_CLEANUP}. Variables @code{ERL}, @code{ERLC}, and (optionally) @code{ERLCFLAGS} must be defined as the path of the Erlang interpreter, the path of the Erlang compiler, and the command-line flags to pass to the compiler, respectively. Those variables should be configured in @file{configure.ac} using the @command{AC_ERLANG_PATH_ERL} and @command{AC_ERLANG_PATH_ERLC} macros, and the configured values of those variables are automatically defined in the testsuite. If @code{ERL} or @code{ERLC} is not defined, the test group is skipped. If the EUnit library cannot be found, i.e. if module @code{eunit} cannot be loaded, the test group is skipped. Otherwise, if @var{test-spec} is an invalid EUnit test specification, the test group fails. Otherwise, if the EUnit test passes, shell commands @var{run-if-pass} are executed or, if the EUnit test fails, shell commands @var{run-if-fail} are executed and the test group fails. Only the generated test Erlang module is automatically compiled and executed. If @var{test-spec} involves testing other Erlang modules, e.g. module @samp{testme} in the example above, those modules must be already compiled. If the testsuite is run in verbose mode and with the @option{--verbose} option, EUnit is also run in verbose mode to output more details about individual unit tests. @end defmac @node testsuite Invocation @section Running @command{testsuite} Scripts @cindex @command{testsuite} Autotest test suites support the following options: @table @option @item --help @itemx -h Display the list of options and exit successfully. @item --version @itemx -V Display the version of the test suite and exit successfully. @item --directory=@var{dir} @itemx -C @var{dir} Change the current directory to @var{dir} before creating any files. Useful for running the testsuite in a subdirectory from a top-level Makefile. @item --jobs@r{[}=@var{n}@r{]} @itemx -j@ovar{n} Run @var{n} tests in parallel, if possible. If @var{n} is not given, run all given tests in parallel. Note that there should be no space before the argument to @option{-j}, as @option{-j @var{number}} denotes the separate arguments @option{-j} and @option{@var{number}}, see below. In parallel mode, the standard input device of the testsuite script is not available to commands inside a test group. Furthermore, banner lines are not printed, and the summary line for each test group is output after the test group completes. Summary lines may appear unordered. If verbose and trace output are enabled (see below), they may appear intermixed from concurrently running tests. Parallel mode requires the @command{mkfifo} command to work, and will be silently disabled otherwise. @item --clean @itemx -c Remove all the files the test suite might have created and exit. Meant for @code{clean} Make targets. @item --list @itemx -l List all the tests (or only the selection), including their possible keywords. @end table @sp 1 By default all tests are performed (or described with @option{--list}) silently in the default environment, but the environment, set of tests, and verbosity level can be tuned: @table @samp @item @var{variable}=@var{value} Set the environment @var{variable} to @var{value}. Use this rather than @samp{FOO=foo ./testsuite} as debugging scripts would then run in a different environment. @cindex @code{AUTOTEST_PATH} The variable @code{AUTOTEST_PATH} specifies the testing path to prepend to @env{PATH}. Relative directory names (not starting with @samp{/}) are considered to be relative to the top level of the package being built. All directories are made absolute, first starting from the top level @emph{build} tree, then from the @emph{source} tree. For instance @samp{./testsuite AUTOTEST_PATH=tests:bin} for a @file{/src/foo-1.0} source package built in @file{/tmp/foo} results in @samp{/tmp/foo/tests:/tmp/foo/bin} and then @samp{/src/foo-1.0/tests:/src/foo-1.0/bin} being prepended to @env{PATH}. @item @var{number} @itemx @var{number}-@var{number} @itemx @var{number}- @itemx -@var{number} Add the corresponding test groups, with obvious semantics, to the selection. @item --keywords=@var{keywords} @itemx -k @var{keywords} Add to the selection the test groups with title or keywords (arguments to @code{AT_SETUP} or @code{AT_KEYWORDS}) that match @emph{all} keywords of the comma separated list @var{keywords}, case-insensitively. Use @samp{!} immediately before the keyword to invert the selection for this keyword. By default, the keywords match whole words; enclose them in @samp{.*} to also match parts of words. For example, running @example @kbd{./testsuite -k 'autoupdate,.*FUNC.*'} @end example @noindent selects all tests tagged @samp{autoupdate} @emph{and} with tags containing @samp{FUNC} (as in @samp{AC_CHECK_FUNC}, @samp{AC_FUNC_ALLOCA}, etc.), while @example @kbd{./testsuite -k '!autoupdate' -k '.*FUNC.*'} @end example @noindent selects all tests not tagged @samp{autoupdate} @emph{or} with tags containing @samp{FUNC}. @item --errexit @itemx -e If any test fails, immediately abort testing. This implies @option{--debug}: post test group clean up, and top-level logging are inhibited. This option is meant for the full test suite, it is not really useful for generated debugging scripts. If the testsuite is run in parallel mode using @option{--jobs}, then concurrently running tests will finish before exiting. @item --verbose @itemx -v Force more verbosity in the detailed output of what is being done. This is the default for debugging scripts. @item --color @itemx --color@r{[}=never@r{|}auto@r{|}always@r{]} Enable colored test results. Without an argument, or with @samp{always}, test results will be colored. With @samp{never}, color mode is turned off. Otherwise, if either the macro @code{AT_COLOR_TESTS} is used by the testsuite author, or the argument @samp{auto} is given, then test results are colored if standard output is connected to a terminal. @item --debug @itemx -d Do not remove the files after a test group was performed---but they are still removed @emph{before}, therefore using this option is sane when running several test groups. Create debugging scripts. Do not overwrite the top-level log (in order to preserve a supposedly existing full log file). This is the default for debugging scripts, but it can also be useful to debug the testsuite itself. @item --recheck Add to the selection all test groups that failed or passed unexpectedly during the last non-debugging test run. @item --trace @itemx -x Trigger shell tracing of the test groups. @end table Besides these options accepted by every Autotest testsuite, the testsuite author might have added package-specific options via the @code{AT_ARG_OPTION} and @code{AT_ARG_OPTION_ARG} macros (@pxref{Writing Testsuites}); refer to @command{testsuite --help} and the package documentation for details. @node Making testsuite Scripts @section Making @command{testsuite} Scripts For putting Autotest into movement, you need some configuration and makefile machinery. We recommend, at least if your package uses deep or shallow hierarchies, that you use @file{tests/} as the name of the directory holding all your tests and their makefile. Here is a check list of things to do, followed by an example, taking into consideration whether you are also using Automake. @itemize @minus @item @cindex @file{package.m4} @atindex{PACKAGE_STRING} @atindex{PACKAGE_BUGREPORT} @atindex{PACKAGE_NAME} @atindex{PACKAGE_TARNAME} @atindex{PACKAGE_VERSION} @atindex{PACKAGE_URL} Make sure to create the file @file{package.m4}, which defines the identity of the package. It must define @code{AT_PACKAGE_STRING}, the full signature of the package, and @code{AT_PACKAGE_BUGREPORT}, the address to which bug reports should be sent. For sake of completeness, we suggest that you also define @code{AT_PACKAGE_NAME}, @code{AT_PACKAGE_TARNAME}, @code{AT_PACKAGE_VERSION}, and @code{AT_PACKAGE_URL}. @xref{Initializing configure}, for a description of these variables. Be sure to distribute @file{package.m4} and to put it into the source hierarchy: the test suite ought to be shipped! See below for an example. @item Invoke @code{AC_CONFIG_TESTDIR} in your @file{configure.ac}. @defmac AC_CONFIG_TESTDIR (@var{directory}, @dvarv{test-path, directory}) @acindex{CONFIG_TESTDIR} An Autotest test suite is to be configured in @var{directory}. This macro causes @file{@var{directory}/atconfig} to be created by @command{config.status} and sets the default @code{AUTOTEST_PATH} to @var{test-path} (@pxref{testsuite Invocation}). @end defmac @item Still within @file{configure.ac}, as appropriate, ensure that some @code{AC_CONFIG_FILES} command includes substitution for @file{tests/atlocal}. @item Also within your @file{configure.ac}, arrange for the @code{AUTOM4TE} variable to be set. @item The appropriate @file{Makefile} should be modified so the validation in your package is triggered by @samp{make check}. @end itemize The following example demonstrates the above checklist, first by assuming that you are using Automake (see below for tweaks to make to get the same results without Automake). Begin by adding the following lines to your @file{configure.ac}: @example # Initialize the test suite. AC_CONFIG_TESTDIR([tests]) AC_CONFIG_FILES([tests/Makefile tests/atlocal]) AM_MISSING_PROG([AUTOM4TE], [autom4te]) @end example Next, add the following lines to your @file{tests/Makefile.am}, in order to link @samp{make check} with a validation suite. @example # The ':;' works around a Bash 3.2 bug when the output is not writable. $(srcdir)/package.m4: $(top_srcdir)/configure.ac :;@{ \ echo '# Signature of the current package.' && \ echo 'm4_define([AT_PACKAGE_NAME],' && \ echo ' [$(PACKAGE_NAME)])' && \ echo 'm4_define([AT_PACKAGE_TARNAME],' && \ echo ' [$(PACKAGE_TARNAME)])' && \ echo 'm4_define([AT_PACKAGE_VERSION],' && \ echo ' [$(PACKAGE_VERSION)])' && \ echo 'm4_define([AT_PACKAGE_STRING],' && \ echo ' [$(PACKAGE_STRING)])' && \ echo 'm4_define([AT_PACKAGE_BUGREPORT],' && \ echo ' [$(PACKAGE_BUGREPORT)])'; \ echo 'm4_define([AT_PACKAGE_URL],' && \ echo ' [$(PACKAGE_URL)])'; \ @} >'$(srcdir)/package.m4' EXTRA_DIST = testsuite.at $(srcdir)/package.m4 $(TESTSUITE) atlocal.in TESTSUITE = $(srcdir)/testsuite check-local: atconfig atlocal $(TESTSUITE) $(SHELL) '$(TESTSUITE)' $(TESTSUITEFLAGS) installcheck-local: atconfig atlocal $(TESTSUITE) $(SHELL) '$(TESTSUITE)' AUTOTEST_PATH='$(bindir)' \ $(TESTSUITEFLAGS) clean-local: test ! -f '$(TESTSUITE)' || \ $(SHELL) '$(TESTSUITE)' --clean AUTOTEST = $(AUTOM4TE) --language=autotest $(TESTSUITE): $(srcdir)/testsuite.at $(srcdir)/package.m4 $(AUTOTEST) -I '$(srcdir)' -o $@@.tmp $@@.at mv $@@.tmp $@@ @end example Note that the built testsuite is distributed; this is necessary because users might not have Autoconf installed, and thus would not be able to rebuild it. Likewise, the use of Automake's @code{AM_MISSING_PROG} will arrange for the definition of @code{$AUTOM4TE} within the Makefile to provide the user with a nicer error message if they modify a source file to the testsuite, and accidentally trigger the rebuild rules. You might want to list explicitly the dependencies, i.e., the list of the files @file{testsuite.at} includes. If you don't use Automake, you should make the following tweaks. In your @file{configure.ac}, replace the @code{AM_MISSING_PROG} line above with @code{AC_PATH_PROG([AUTOM4TE], [autom4te], [false])}. You are welcome to also try using the @command{missing} script from the Automake project instead of @command{false}, to try to get a nicer error message when the user modifies prerequisites but did not have Autoconf installed, but at that point you may be better off using Automake. Then, take the code suggested above for @file{tests/@/Makefile.am} and place it in your @file{tests/@/Makefile.in} instead. Add code to your @file{tests/@/Makefile.in} to ensure that @code{$(EXTRA_DIST)} files are distributed, as well as adding the following additional lines to prepare the set of needed Makefile variables: @example subdir = tests PACKAGE_NAME = @@PACKAGE_NAME@@ PACKAGE_TARNAME = @@PACKAGE_TARNAME@@ PACKAGE_VERSION = @@PACKAGE_VERSION@@ PACKAGE_STRING = @@PACKAGE_STRING@@ PACKAGE_BUGREPORT = @@PACKAGE_BUGREPORT@@ PACKAGE_URL = @@PACKAGE_URL@@ AUTOM4TE = @@AUTOM4TE@@ atconfig: $(top_builddir)/config.status cd $(top_builddir) && \ $(SHELL) ./config.status $(subdir)/$@@ atlocal: $(srcdir)/atlocal.in $(top_builddir)/config.status cd $(top_builddir) && \ $(SHELL) ./config.status $(subdir)/$@@ @end example Using the above example (with or without Automake), and assuming you were careful to not initialize @samp{TESTSUITEFLAGS} within your makefile, you can now fine-tune test suite execution at runtime by altering this variable, for example: @example make check TESTSUITEFLAGS='-v -d -x 75 -k AC_PROG_CC CFLAGS=-g' @end example @c =============================== Frequent Autoconf Questions, with answers @node FAQ @chapter Frequent Autoconf Questions, with answers Several questions about Autoconf come up occasionally. Here some of them are addressed. @menu * Distributing:: Distributing @command{configure} scripts * Why GNU M4:: Why not use the standard M4? * Bootstrapping:: Autoconf and GNU M4 require each other? * Why Not Imake:: Why GNU uses @command{configure} instead of Imake * Defining Directories:: Passing @code{datadir} to program * Autom4te Cache:: What is it? Can I remove it? * Present But Cannot Be Compiled:: Compiler and Preprocessor Disagree * Expanded Before Required:: Expanded Before Required * Debugging:: Debugging @command{configure} scripts @end menu @node Distributing @section Distributing @command{configure} Scripts @cindex License @display What are the restrictions on distributing @command{configure} scripts that Autoconf generates? How does that affect my programs that use them? @end display There are no restrictions on how the configuration scripts that Autoconf produces may be distributed or used. In Autoconf version 1, they were covered by the GNU General Public License. We still encourage software authors to distribute their work under terms like those of the GPL, but doing so is not required to use Autoconf. Of the other files that might be used with @command{configure}, @file{config.h.in} is under whatever copyright you use for your @file{configure.ac}. @file{config.sub} and @file{config.guess} have an exception to the GPL when they are used with an Autoconf-generated @command{configure} script, which permits you to distribute them under the same terms as the rest of your package. @file{install-sh} is from the X Consortium and is not copyrighted. @node Why GNU M4 @section Why Require GNU M4? @display Why does Autoconf require GNU M4? @end display Many M4 implementations have hard-coded limitations on the size and number of macros that Autoconf exceeds. They also lack several builtin macros that it would be difficult to get along without in a sophisticated application like Autoconf, including: @example m4_builtin m4_indir m4_bpatsubst __file__ __line__ @end example Autoconf requires version 1.4.6 or later of GNU M4. Since only software maintainers need to use Autoconf, and since GNU M4 is simple to configure and install, it seems reasonable to require GNU M4 to be installed also. Many maintainers of GNU and other free software already have most of the GNU utilities installed, since they prefer them. @node Bootstrapping @section How Can I Bootstrap? @cindex Bootstrap @display If Autoconf requires GNU M4 and GNU M4 has an Autoconf @command{configure} script, how do I bootstrap? It seems like a chicken and egg problem! @end display This is a misunderstanding. Although GNU M4 does come with a @command{configure} script produced by Autoconf, Autoconf is not required in order to run the script and install GNU M4. Autoconf is only required if you want to change the M4 @command{configure} script, which few people have to do (mainly its maintainer). @node Why Not Imake @section Why Not Imake? @cindex Imake @display Why not use Imake instead of @command{configure} scripts? @end display Several people have written addressing this question, so adaptations of their explanations are included here. The following answer is based on one written by Richard Pixley: @quotation Autoconf generated scripts frequently work on machines that it has never been set up to handle before. That is, it does a good job of inferring a configuration for a new system. Imake cannot do this. Imake uses a common database of host specific data. For X11, this makes sense because the distribution is made as a collection of tools, by one central authority who has control over the database. GNU tools are not released this way. Each GNU tool has a maintainer; these maintainers are scattered across the world. Using a common database would be a maintenance nightmare. Autoconf may appear to be this kind of database, but in fact it is not. Instead of listing host dependencies, it lists program requirements. If you view the GNU suite as a collection of native tools, then the problems are similar. But the GNU development tools can be configured as cross tools in almost any host+target permutation. All of these configurations can be installed concurrently. They can even be configured to share host independent files across hosts. Imake doesn't address these issues. Imake templates are a form of standardization. The GNU coding standards address the same issues without necessarily imposing the same restrictions. @end quotation Here is some further explanation, written by Per Bothner: @quotation One of the advantages of Imake is that it is easy to generate large makefiles using the @samp{#include} and macro mechanisms of @command{cpp}. However, @code{cpp} is not programmable: it has limited conditional facilities, and no looping. And @code{cpp} cannot inspect its environment. All of these problems are solved by using @code{sh} instead of @code{cpp}. The shell is fully programmable, has macro substitution, can execute (or source) other shell scripts, and can inspect its environment. @end quotation Paul Eggert elaborates more: @quotation With Autoconf, installers need not assume that Imake itself is already installed and working well. This may not seem like much of an advantage to people who are accustomed to Imake. But on many hosts Imake is not installed or the default installation is not working well, and requiring Imake to install a package hinders the acceptance of that package on those hosts. For example, the Imake template and configuration files might not be installed properly on a host, or the Imake build procedure might wrongly assume that all source files are in one big directory tree, or the Imake configuration might assume one compiler whereas the package or the installer needs to use another, or there might be a version mismatch between the Imake expected by the package and the Imake supported by the host. These problems are much rarer with Autoconf, where each package comes with its own independent configuration processor. Also, Imake often suffers from unexpected interactions between @command{make} and the installer's C preprocessor. The fundamental problem here is that the C preprocessor was designed to preprocess C programs, not makefiles. This is much less of a problem with Autoconf, which uses the general-purpose preprocessor M4, and where the package's author (rather than the installer) does the preprocessing in a standard way. @end quotation Finally, Mark Eichin notes: @quotation Imake isn't all that extensible, either. In order to add new features to Imake, you need to provide your own project template, and duplicate most of the features of the existing one. This means that for a sophisticated project, using the vendor-provided Imake templates fails to provide any leverage---since they don't cover anything that your own project needs (unless it is an X11 program). On the other side, though: The one advantage that Imake has over @command{configure}: @file{Imakefile} files tend to be much shorter (likewise, less redundant) than @file{Makefile.in} files. There is a fix to this, however---at least for the Kerberos V5 tree, we've modified things to call in common @file{post.in} and @file{pre.in} makefile fragments for the entire tree. This means that a lot of common things don't have to be duplicated, even though they normally are in @command{configure} setups. @end quotation @node Defining Directories @section How Do I @code{#define} Installation Directories? @display My program needs library files, installed in @code{datadir} and similar. If I use @example AC_DEFINE_UNQUOTED([DATADIR], [$datadir], [Define to the read-only architecture-independent data directory.]) @end example @noindent I get @example #define DATADIR "$@{prefix@}/share" @end example @end display As already explained, this behavior is on purpose, mandated by the GNU Coding Standards, see @ref{Installation Directory Variables}. There are several means to achieve a similar goal: @itemize @minus @item Do not use @code{AC_DEFINE} but use your makefile to pass the actual value of @code{datadir} via compilation flags. @xref{Installation Directory Variables}, for the details. @item This solution can be simplified when compiling a program: you may either extend the @code{CPPFLAGS}: @example CPPFLAGS = -DDATADIR='"$(datadir)"' @@CPPFLAGS@@ @end example @noindent If you are using Automake, you should use @code{AM_CPPFLAGS} instead: @example AM_CPPFLAGS = -DDATADIR='"$(datadir)"' @end example @noindent Alternatively, create a dedicated header file: @example DISTCLEANFILES = myprog-paths.h myprog-paths.h: Makefile echo '#define DATADIR "$(datadir)"' >$@@ @end example @noindent The Gnulib module @samp{configmake} provides such a header with all the standard directory variables defined, @pxref{configmake,,, gnulib, GNU Gnulib}. @item Use @code{AC_DEFINE} but have @command{configure} compute the literal value of @code{datadir} and others. Many people have wrapped macros to automate this task; for an example, see the macro @code{AC_DEFINE_DIR} from the @uref{https://@/www.gnu.org/@/software/@/autoconf-archive/, Autoconf Macro Archive}. This solution does not conform to the GNU Coding Standards. @item Note that all the previous solutions hard wire the absolute name of these directories in the executables, which is not a good property. You may try to compute the names relative to @code{prefix}, and try to find @code{prefix} at runtime, this way your package is relocatable. @end itemize @node Autom4te Cache @section What is @file{autom4te.cache}? @display What is this directory @file{autom4te.cache}? Can I safely remove it? @end display In the GNU Build System, @file{configure.ac} plays a central role and is read by many tools: @command{autoconf} to create @file{configure}, @command{autoheader} to create @file{config.h.in}, @command{automake} to create @file{Makefile.in}, @command{autoscan} to check the completeness of @file{configure.ac}, @command{autoreconf} to check the GNU Build System components that are used. To ``read @file{configure.ac}'' actually means to compile it with M4, which can be a long process for complex @file{configure.ac}. This is why all these tools, instead of running directly M4, invoke @command{autom4te} (@pxref{autom4te Invocation}) which, while answering to a specific demand, stores additional information in @file{autom4te.cache} for future runs. For instance, if you run @command{autoconf}, behind the scenes, @command{autom4te} also stores information for the other tools, so that when you invoke @command{autoheader} or @command{automake} etc., reprocessing @file{configure.ac} is not needed. The speed up is frequently 30%, and is increasing with the size of @file{configure.ac}. But it is and remains being simply a cache: you can safely remove it. @sp 1 @display Can I permanently get rid of it? @end display The creation of this cache can be disabled from @file{~/.autom4te.cfg}, see @ref{Customizing autom4te}, for more details. You should be aware that disabling the cache slows down the Autoconf test suite by 40%. The more GNU Build System components are used, the more the cache is useful; for instance running @samp{autoreconf -f} on the Core Utilities is twice slower without the cache @emph{although @option{--force} implies that the cache is not fully exploited}, and eight times slower than without @option{--force}. @node Present But Cannot Be Compiled @section Header Present But Cannot Be Compiled The most important guideline to bear in mind when checking for features is to mimic as much as possible the intended use. Unfortunately, old versions of @code{AC_CHECK_HEADER} and @code{AC_CHECK_HEADERS} failed to follow this idea, and called the preprocessor, instead of the compiler, to check for headers. As a result, incompatibilities between headers went unnoticed during configuration, and maintainers finally had to deal with this issue elsewhere. The transition began with Autoconf 2.56. As of Autoconf 2.64 both checks are performed, and @command{configure} complains loudly if the compiler and the preprocessor do not agree. However, only the compiler result is considered. As of Autoconf 2.70, only the compiler check is performed. Consider the following example: @smallexample $ @kbd{cat number.h} typedef int number; $ @kbd{cat pi.h} const number pi = 3; $ @kbd{cat configure.ac} AC_INIT([Example], [1.0], [bug-example@@example.org]) AC_CHECK_HEADERS([pi.h]) $ @kbd{autoconf -Wall} $ @kbd{./configure CPPFLAGS='-I.'} checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.out checking for suffix of executables... checking whether we are cross compiling... no checking for suffix of object files... o checking whether the compiler supports GNU C... yes checking whether gcc accepts -g... yes checking for gcc option to enable C11 features... -std=gnu11 checking for sys/types.h... yes checking for sys/stat.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking for pi.h... no @end smallexample @noindent The proper way to handle this case is using the fourth argument (@pxref{Generic Headers}): @example $ @kbd{cat configure.ac} AC_INIT([Example], [1.0], [bug-example@@example.org]) AC_CHECK_HEADERS([number.h pi.h], [], [], [[#ifdef HAVE_NUMBER_H # include <number.h> #endif ]]) $ @kbd{autoconf -Wall} $ @kbd{./configure CPPFLAGS='-I.'} checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.out checking for suffix of executables... checking whether we are cross compiling... no checking for suffix of object files... o checking whether the compiler supports GNU C... yes checking whether gcc accepts -g... yes checking for gcc option to enable C11 features... -std=gnu11 checking for number.h... yes checking for pi.h... yes @end example See @ref{Particular Headers}, for a list of headers with their prerequisites. @node Expanded Before Required @section Expanded Before Required @cindex expanded before required Older versions of Autoconf silently built files with incorrect ordering between dependent macros if an outer macro first expanded, then later indirectly required, an inner macro. Starting with Autoconf 2.64, this situation no longer generates out-of-order code, but results in duplicate output and a syntax warning: @example $ @kbd{cat configure.ac} @result{}AC_DEFUN([TESTA], [[echo in A @result{}if test -n "$SEEN_A" ; then echo duplicate ; fi @result{}SEEN_A=:]]) @result{}AC_DEFUN([TESTB], [AC_REQUIRE([TESTA])[echo in B @result{}if test -z "$SEEN_A" ; then echo bug ; fi]]) @result{}AC_DEFUN([TESTC], [AC_REQUIRE([TESTB])[echo in C]]) @result{}AC_DEFUN([OUTER], [[echo in OUTER] @result{}TESTA @result{}TESTC]) @result{}AC_INIT @result{}OUTER @result{}AC_OUTPUT $ @kbd{autoconf} @result{}configure.ac:11: warning: AC_REQUIRE: @result{} `TESTA' was expanded before it was required @result{}configure.ac:4: TESTB is expanded from... @result{}configure.ac:6: TESTC is expanded from... @result{}configure.ac:7: OUTER is expanded from... @result{}configure.ac:11: the top level @end example @noindent To avoid this warning, decide what purpose the macro in question serves. If it only needs to be expanded once (for example, if it provides initialization text used by later macros), then the simplest fix is to change the macro to be declared with @code{AC_DEFUN_ONCE} (@pxref{One-Shot Macros}), although this only works in Autoconf 2.64 and newer. A more portable fix is to change all instances of direct calls to instead go through @code{AC_REQUIRE} (@pxref{Prerequisite Macros}). If, instead, the macro is parameterized by arguments or by the current definition of other macros in the m4 environment, then the macro should always be directly expanded instead of required. For another case study, consider this example trimmed down from an actual package. Originally, the package contained shell code and multiple macro invocations at the top level of @file{configure.ac}: @example AC_DEFUN([FOO], [AC_COMPILE_IFELSE([@dots{}])]) foobar= AC_PROG_CC FOO @end example @noindent but that was getting complex, so the author wanted to offload some of the text into a new macro in another file included via @file{aclocal.m4}. The na@"ive approach merely wraps the text in a new macro: @example AC_DEFUN([FOO], [AC_COMPILE_IFELSE([@dots{}])]) AC_DEFUN([BAR], [ foobar= AC_PROG_CC FOO ]) BAR @end example @noindent With older versions of Autoconf, the setting of @samp{foobar=} occurs before the single compiler check, as the author intended. But with Autoconf 2.64, this issues the ``expanded before it was required'' warning for @code{AC_PROG_CC}, and outputs two copies of the compiler check, one before @samp{foobar=}, and one after. To understand why this is happening, remember that the use of @code{AC_COMPILE_IFELSE} includes a call to @code{AC_REQUIRE([AC_PROG_CC])} under the hood. According to the documented semantics of @code{AC_REQUIRE}, this means that @code{AC_PROG_CC} @emph{must} occur before the body of the outermost @code{AC_DEFUN}, which in this case is @code{BAR}, thus preceding the use of @samp{foobar=}. The older versions of Autoconf were broken with regards to the rules of @code{AC_REQUIRE}, which explains why the code changed from one over to two copies of @code{AC_PROG_CC} when upgrading autoconf. In other words, the author was unknowingly relying on a bug exploit to get the desired results, and that exploit broke once the bug was fixed. So, what recourse does the author have, to restore their intended semantics of setting @samp{foobar=} prior to a single compiler check, regardless of whether Autoconf 2.63 or 2.64 is used? One idea is to remember that only @code{AC_DEFUN} is impacted by @code{AC_REQUIRE}; there is always the possibility of using the lower-level @code{m4_define}: @example AC_DEFUN([FOO], [AC_COMPILE_IFELSE([@dots{}])]) m4_define([BAR], [ foobar= AC_PROG_CC FOO ]) BAR @end example @noindent This works great if everything is in the same file. However, it does not help in the case where the author wants to have @command{aclocal} find the definition of @code{BAR} from its own file, since @command{aclocal} requires the use of @code{AC_DEFUN}. In this case, a better fix is to recognize that if @code{BAR} also uses @code{AC_REQUIRE}, then there will no longer be direct expansion prior to a subsequent require. Then, by creating yet another helper macro, the author can once again guarantee a single invocation of @code{AC_PROG_CC}, which will still occur after @code{foobar=}. The author can also use @code{AC_BEFORE} to make sure no other macro appearing before @code{BAR} has triggered an unwanted expansion of @code{AC_PROG_CC}. @example AC_DEFUN([FOO], [AC_COMPILE_IFELSE([@dots{}])]) AC_DEFUN([BEFORE_CC], [ foobar= ]) AC_DEFUN([BAR], [ AC_BEFORE([$0], [AC_PROG_CC])dnl AC_REQUIRE([BEFORE_CC])dnl AC_REQUIRE([AC_PROG_CC])dnl FOO ]) BAR @end example @node Debugging @section Debugging @command{configure} scripts While in general, @command{configure} scripts generated by Autoconf strive to be fairly portable to various systems, compilers, shells, and other tools, it may still be necessary to debug a failing test, broken script or makefile, or fix or override an incomplete, faulty, or erroneous test, especially during macro development. Failures can occur at all levels, in M4 syntax or semantics, shell script issues, or due to bugs in the test or the tools invoked by @command{configure}. Together with the rather arcane error message that @command{m4} and @command{make} may produce when their input contains syntax errors, this can make debugging rather painful. Nevertheless, here is a list of hints and strategies that may help: @itemize @item When @command{autoconf} fails, common causes for error include: @itemize @item mismatched or unbalanced parentheses or braces (@pxref{Balancing Parentheses}), @item under- or over-quoted macro arguments (@pxref{Autoconf Language}, @pxref{Quoting and Parameters}, @pxref{Quotation and Nested Macros}), @item spaces between macro name and opening parenthesis (@pxref{Autoconf Language}). @end itemize Typically, it helps to go back to the last working version of the input and compare the differences for each of these errors. Another possibility is to sprinkle pairs of @code{m4_traceon} and @code{m4_traceoff} judiciously in the code, either without a parameter or listing some macro names and watch @command{m4} expand its input verbosely (@pxref{Debugging via autom4te}). @item Sometimes @command{autoconf} succeeds but the generated @command{configure} script has invalid shell syntax. You can detect this case by running @samp{bash -n configure} or @samp{sh -n configure}. If this command fails, the same tips apply, as if @command{autoconf} had failed. @item Debugging @command{configure} script execution may be done by sprinkling pairs of @code{set -x} and @code{set +x} into the shell script before and after the region that contains a bug. Running the whole script with @samp{@var{shell} -vx ./configure 2>&1 | tee @var{log-file}} with a decent @var{shell} may work, but produces lots of output. Here, it can help to search for markers like @samp{checking for} a particular test in the @var{log-file}. @item Alternatively, you might use a shell with debugging capabilities like @uref{http://bashdb.sourceforge.net/, bashdb}. @item When @command{configure} tests produce invalid results for your system, it may be necessary to override them: @itemize @item For programs, tools or libraries variables, preprocessor, compiler, or linker flags, it is often sufficient to override them at @command{make} run time with some care (@pxref{Macros and Submakes}). Since this normally won't cause @command{configure} to be run again with these changed settings, it may fail if the changed variable would have caused different test results from @command{configure}, so this may work only for simple differences. @item Most tests which produce their result in a substituted variable allow to override the test by setting the variable on the @command{configure} command line (@pxref{Compilers and Options}, @pxref{Defining Variables}, @pxref{Particular Systems}). @item Many tests store their result in a cache variable (@pxref{Caching Results}). This lets you override them either on the @command{configure} command line as above, or through a primed cache or site file (@pxref{Cache Files}, @pxref{Site Defaults}). The name of a cache variable is documented with a test macro or may be inferred from @ref{Cache Variable Names}; the precise semantics of undocumented variables are often internal details, subject to change. @end itemize @item Alternatively, @command{configure} may produce invalid results because of uncaught programming errors, in your package or in an upstream library package. For example, when @code{AC_CHECK_LIB} fails to find a library with a specified function, always check @file{config.log}. This will reveal the exact error that produced the failing result: the library linked by @code{AC_CHECK_LIB} probably has a fatal bug. @end itemize Conversely, as macro author, you can make it easier for users of your macro: @itemize @item by minimizing dependencies between tests and between test results as far as possible, @item by using @command{make} variables to factorize and allow override of settings at @command{make} run time, @item by honoring the GNU Coding Standards and not overriding flags reserved for the user except temporarily during @command{configure} tests, @item by not requiring users of your macro to use the cache variables. Instead, expose the result of the test via @var{run-if-true} and @var{run-if-false} parameters. If the result is not a boolean, then provide it through documented shell variables. @end itemize @c ===================================================== History of Autoconf. @node History @chapter History of Autoconf @cindex History of autoconf @emph{This chapter was written by the original author, David MacKenzie.} You may be wondering, Why was Autoconf originally written? How did it get into its present form? (Why does it look like gorilla spit?) If you're not wondering, then this chapter contains no information useful to you, and you might as well skip it. If you @emph{are} wondering, then let there be light@enddots{} @menu * Genesis:: Prehistory and naming of @command{configure} * Exodus:: The plagues of M4 and Perl * Leviticus:: The priestly code of portability arrives * Numbers:: Growth and contributors * Deuteronomy:: Approaching the promises of easy configuration @end menu @node Genesis @section Genesis In June 1991 I was maintaining many of the GNU utilities for the Free Software Foundation. As they were ported to more platforms and more programs were added, the number of @option{-D} options that users had to select in the makefile (around 20) became burdensome. Especially for me---I had to test each new release on a bunch of different systems. So I wrote a little shell script to guess some of the correct settings for the fileutils package, and released it as part of fileutils 2.0. That @command{configure} script worked well enough that the next month I adapted it (by hand) to create similar @command{configure} scripts for several other GNU utilities packages. Brian Berliner also adapted one of my scripts for his CVS revision control system. Later that summer, I learned that Richard Stallman and Richard Pixley were developing similar scripts to use in the GNU compiler tools; so I adapted my @command{configure} scripts to support their evolving interface: using the file name @file{Makefile.in} as the templates; adding @samp{+srcdir}, the first option (of many); and creating @file{config.status} files. @node Exodus @section Exodus As I got feedback from users, I incorporated many improvements, using Emacs to search and replace, cut and paste, similar changes in each of the scripts. As I adapted more GNU utilities packages to use @command{configure} scripts, updating them all by hand became impractical. Rich Murphey, the maintainer of the GNU graphics utilities, sent me mail saying that the @command{configure} scripts were great, and asking if I had a tool for generating them that I could send him. No, I thought, but I should! So I started to work out how to generate them. And the journey from the slavery of hand-written @command{configure} scripts to the abundance and ease of Autoconf began. Cygnus @command{configure}, which was being developed at around that time, is table driven; it is meant to deal mainly with a discrete number of system types with a small number of mainly unguessable features (such as details of the object file format). The automatic configuration system that Brian Fox had developed for Bash takes a similar approach. For general use, it seems to me a hopeless cause to try to maintain an up-to-date database of which features each variant of each operating system has. It's easier and more reliable to check for most features on the fly---especially on hybrid systems that people have hacked on locally or that have patches from vendors installed. I considered using an architecture similar to that of Cygnus @command{configure}, where there is a single @command{configure} script that reads pieces of @file{configure.in} when run. But I didn't want to have to distribute all of the feature tests with every package, so I settled on having a different @command{configure} made from each @file{configure.in} by a preprocessor. That approach also offered more control and flexibility. I looked briefly into using the Metaconfig package, by Larry Wall, Harlan Stenn, and Raphael Manfredi, but I decided not to for several reasons. The @command{Configure} scripts it produces are interactive, which I find quite inconvenient; I didn't like the ways it checked for some features (such as library functions); I didn't know that it was still being maintained, and the @command{Configure} scripts I had seen didn't work on many modern systems (such as System V R4 and NeXT); it wasn't flexible in what it could do in response to a feature's presence or absence; I found it confusing to learn; and it was too big and complex for my needs (I didn't realize then how much Autoconf would eventually have to grow). I considered using Perl to generate my style of @command{configure} scripts, but decided that M4 was better suited to the job of simple textual substitutions: it gets in the way less, because output is implicit. Plus, everyone already has it. (Initially I didn't rely on the GNU extensions to M4.) Also, some of my friends at the University of Maryland had recently been putting M4 front ends on several programs, including @code{tvtwm}, and I was interested in trying out a new language. @node Leviticus @section Leviticus Since my @command{configure} scripts determine the system's capabilities automatically, with no interactive user intervention, I decided to call the program that generates them Autoconfig. But with a version number tacked on, that name would be too long for old Unix file systems, so I shortened it to Autoconf. In the fall of 1991 I called together a group of fellow questers after the Holy Grail of portability (er, that is, alpha testers) to give me feedback as I encapsulated pieces of my handwritten scripts in M4 macros and continued to add features and improve the techniques used in the checks. Prominent among the testers were Fran@,{c}ois Pinard, who came up with the idea of making an Autoconf shell script to run M4 and check for unresolved macro calls; Richard Pixley, who suggested running the compiler instead of searching the file system to find include files and symbols, for more accurate results; Karl Berry, who got Autoconf to configure @TeX{} and added the macro index to the documentation; and Ian Lance Taylor, who added support for creating a C header file as an alternative to putting @option{-D} options in a makefile, so he could use Autoconf for his UUCP package. The alpha testers cheerfully adjusted their files again and again as the names and calling conventions of the Autoconf macros changed from release to release. They all contributed many specific checks, great ideas, and bug fixes. @node Numbers @section Numbers In July 1992, after months of alpha testing, I released Autoconf 1.0, and converted many GNU packages to use it. I was surprised by how positive the reaction to it was. More people started using it than I could keep track of, including people working on software that wasn't part of the GNU Project (such as TCL, FSP, and Kerberos V5). Autoconf continued to improve rapidly, as many people using the @command{configure} scripts reported problems they encountered. Autoconf turned out to be a good torture test for M4 implementations. Unix M4 started to dump core because of the length of the macros that Autoconf defined, and several bugs showed up in GNU M4 as well. Eventually, we realized that we needed to use some features that only GNU M4 has. 4.3BSD M4, in particular, has an impoverished set of builtin macros; the System V version is better, but still doesn't provide everything we need. More development occurred as people put Autoconf under more stresses (and to uses I hadn't anticipated). Karl Berry added checks for X11. david zuhn contributed C++ support. Fran@,{c}ois Pinard made it diagnose invalid arguments. Jim Blandy bravely coerced it into configuring GNU Emacs, laying the groundwork for several later improvements. Roland McGrath got it to configure the GNU C Library, wrote the @command{autoheader} script to automate the creation of C header file templates, and added a @option{--verbose} option to @command{configure}. Noah Friedman added the @option{--autoconf-dir} option and @code{AC_MACRODIR} environment variable. (He also coined the term @dfn{autoconfiscate} to mean ``adapt a software package to use Autoconf''.) Roland and Noah improved the quoting protection in @code{AC_DEFINE} and fixed many bugs, especially when I got sick of dealing with portability problems from February through June, 1993. @node Deuteronomy @section Deuteronomy A long wish list for major features had accumulated, and the effect of several years of patching by various people had left some residual cruft. In April 1994, while working for Cygnus Support, I began a major revision of Autoconf. I added most of the features of the Cygnus @command{configure} that Autoconf had lacked, largely by adapting the relevant parts of Cygnus @command{configure} with the help of david zuhn and Ken Raeburn. These features include support for using @file{config.sub}, @file{config.guess}, @option{--host}, and @option{--target}; making links to files; and running @command{configure} scripts in subdirectories. Adding these features enabled Ken to convert GNU @code{as}, and Rob Savoye to convert DejaGNU, to using Autoconf. I added more features in response to other peoples' requests. Many people had asked for @command{configure} scripts to share the results of the checks between runs, because (particularly when configuring a large source tree, like Cygnus does) they were frustratingly slow. Mike Haertel suggested adding site-specific initialization scripts. People distributing software that had to unpack on MS-DOS asked for a way to override the @file{.in} extension on the file names, which produced file names like @file{config.h.in} containing two dots. Jim Avera did an extensive examination of the problems with quoting in @code{AC_DEFINE} and @code{AC_SUBST}; his insights led to significant improvements. Richard Stallman asked that compiler output be sent to @file{config.log} instead of @file{/dev/null}, to help people debug the Emacs @command{configure} script. I made some other changes because of my dissatisfaction with the quality of the program. I made the messages showing results of the checks less ambiguous, always printing a result. I regularized the names of the macros and cleaned up coding style inconsistencies. I added some auxiliary utilities that I had developed to help convert source code packages to use Autoconf. With the help of Fran@,{c}ois Pinard, I made the macros not interrupt each others' messages. (That feature revealed some performance bottlenecks in GNU M4, which he hastily corrected!) I reorganized the documentation around problems people want to solve. And I began a test suite, because experience had shown that Autoconf has a pronounced tendency to regress when we change it. Again, several alpha testers gave invaluable feedback, especially Fran@,{c}ois Pinard, Jim Meyering, Karl Berry, Rob Savoye, Ken Raeburn, and Mark Eichin. Finally, version 2.0 was ready. And there was much rejoicing. (And I have free time again. I think. Yeah, right.) @c ========================================================== Appendices @node GNU Free Documentation License @appendix GNU Free Documentation License @include fdl.texi @node Indices @appendix Indices @menu * Environment Variable Index:: Index of environment variables used * Output Variable Index:: Index of variables set in output files * Preprocessor Symbol Index:: Index of C preprocessor symbols defined * Cache Variable Index:: Index of documented cache variables * Autoconf Macro Index:: Index of Autoconf macros * M4 Macro Index:: Index of M4, M4sugar, and M4sh macros * Autotest Macro Index:: Index of Autotest macros * Program & Function Index:: Index of those with portability problems * Concept Index:: General index @end menu @node Environment Variable Index @appendixsec Environment Variable Index This is an alphabetical list of the environment variables that might influence Autoconf checks. @printindex ev @node Output Variable Index @appendixsec Output Variable Index This is an alphabetical list of the variables that Autoconf can substitute into files that it creates, typically one or more makefiles. @xref{Setting Output Variables}, for more information on how this is done. @printindex ov @node Preprocessor Symbol Index @appendixsec Preprocessor Symbol Index This is an alphabetical list of the C preprocessor symbols that the Autoconf macros define. To work with Autoconf, C source code needs to use these names in @code{#if} or @code{#ifdef} directives. @printindex cv @node Cache Variable Index @appendixsec Cache Variable Index This is an alphabetical list of documented cache variables used by macros defined in Autoconf. Autoconf macros may use additional cache variables internally. @ifset shortindexflag To make the list easier to use, the variables are listed without their preceding @samp{ac_cv_}. @end ifset @printindex CA @node Autoconf Macro Index @appendixsec Autoconf Macro Index This is an alphabetical list of the Autoconf macros. @ifset shortindexflag To make the list easier to use, the macros are listed without their preceding @samp{AC_}. @end ifset @printindex AC @node M4 Macro Index @appendixsec M4 Macro Index This is an alphabetical list of the M4, M4sugar, and M4sh macros. @ifset shortindexflag To make the list easier to use, the macros are listed without their preceding @samp{m4_} or @samp{AS_}. The prefix is @samp{m4_} for all-lowercase macro names and @samp{AS_} for all-uppercase macro names. @end ifset @printindex MS @node Autotest Macro Index @appendixsec Autotest Macro Index This is an alphabetical list of the Autotest macros. @ifset shortindexflag To make the list easier to use, the macros are listed without their preceding @samp{AT_}. @end ifset @printindex AT @node Program & Function Index @appendixsec Program and Function Index This is an alphabetical list of the programs and functions whose portability is discussed in this document. @printindex pr @node Concept Index @appendixsec Concept Index This is an alphabetical list of the files, tools, and concepts introduced in this document. @printindex cp @bye @c LocalWords: texinfo setfilename autoconf texi settitle setchapternewpage @c LocalWords: setcontentsaftertitlepage finalout ARG ovar varname dvar acx @c LocalWords: makeinfo dvi defcodeindex ev ov CPP cv Autotest mv defindex fn @c LocalWords: shortindexflag iftex ifset acindex ACindex ifclear ahindex fu @c LocalWords: asindex MSindex atindex ATindex auindex hdrindex prindex FIXME @c LocalWords: msindex alloca fnindex Aaarg indices FSF's dircategory ifnames @c LocalWords: direntry autoscan autoreconf autoheader autoupdate config FDs @c LocalWords: testsuite titlepage Elliston Demaille vskip filll ifnottex hmm @c LocalWords: insertcopying Autoconf's detailmenu Automake Libtool Posix ois @c LocalWords: Systemology Checkpointing Changequote INTERCAL changequote dfn @c LocalWords: Quadrigraphs builtins Shellology acconfig Bugward LIBOBJ Imake @c LocalWords: LIBOBJS IFELSE cindex flushright Pinard Metaconfig uref Simons @c LocalWords: distclean uninstall noindent versioning Tromey dir vr @c LocalWords: SAMS samp aclocal acsite underquoted emph itemx prepend SUBST @c LocalWords: evindex automake Gettext autopoint gettext symlink libtoolize @c LocalWords: defmac INIT tarname ovindex cvindex BUGREPORT PREREQ asis PROG @c LocalWords: SRCDIR srcdir globbing afterwards cmds foos fooo foooo init cd @c LocalWords: builddir timestamp src Imakefile chmod defvar CFLAGS CPPFLAGS @c LocalWords: CXXFLAGS DEFS DHAVE defvarx FCFLAGS FFLAGS LDFLAGS bindir GCC @c LocalWords: datadir datarootdir docdir dvidir htmldir libdir ifnothtml kbd @c LocalWords: includedir infodir libexecdir localedir localstatedir mandir @c LocalWords: oldincludedir pdfdir PDF psdir PostScript sbindir sysconfdir @c LocalWords: sharedstatedir DDATADIR sed tmp pkgdatadir VPATH conf unistd @c LocalWords: undef endif builtin FUNCS ifndef STACKSEG getb GETB YMP fubar @c LocalWords: PRE dest SUBDIRS subdirs fi struct STDC stdlib stddef INTTYPES @c LocalWords: inttypes STDINT stdint AWK AIX Solaris NeXT env EGREP FGREP yy @c LocalWords: LEXLIB YYTEXT lfl nonportable Automake's LN RANLIB byacc INETD @c LocalWords: inetd prog PROGS progs ranlib lmp lXt lX nsl gethostbyname UX @c LocalWords: NextStep isinf isnan glibc IRIX sunmath lm lsunmath pre sizeof @c LocalWords: ld inline malloc putenv setenv FreeBSD realloc SunOS MinGW @c LocalWords: snprintf vsnprintf sprintf vsprintf sscanf gcc strerror ifdef @c LocalWords: strnlen sysconf PAGESIZE unsetenv va fallback memcpy dst FUNC @c LocalWords: PowerPC GNUC libPW pragma Olibcalls CHOWN chown CLOSEDIR VFORK @c LocalWords: closedir FNMATCH fnmatch vfork FSEEKO LARGEFILE fseeko SVR sc @c LocalWords: largefile GETGROUPS getgroups GETLOADAVG DGUX UMAX NLIST KMEM @c LocalWords: SETGID getloadavg nlist GETMNTENT irix acxindex autom @c LocalWords: getmntent UnixWare GETPGRP getpgid getpgrp Posix's pid LSTAT @c LocalWords: lstat rpl MEMCMP memcmp OpenStep MBRTOWC mbrtowc MKTIME mktime @c LocalWords: localtime MMAP mmap OBSTACK obstack obstacks ARGTYPES timeval @c LocalWords: SETPGRP setpgrp defmacx Hurd SETVBUF setvbuf STRCOLL strcoll @c LocalWords: STRTOD strtod DECL STRFTIME strftime SCO UTIME utime VPRINTF @c LocalWords: DOPRNT vprintf doprnt sp unfixable LIBSOURCE LIBSOURCES Eggert @c LocalWords: linux netinet ia Tru XFree DIRENT NDIR dirent ndir multitable @c LocalWords: NAMLEN strlen namlen MKDEV SYSMACROS makedev RESOLV resolv DNS @c LocalWords: inet structs NAMESER arpa NETDB netdb UTekV UTS GCC's kB @c LocalWords: STDBOOL BOOL stdbool cplusplus bool Bool stdarg tm te @c LocalWords: ctype strchr strrchr rindex bcopy memmove memchr WEXITSTATUS @c LocalWords: WIFEXITED TIOCGWINSZ GWINSZ termios preprocess preprocessable @c LocalWords: DECLS strdup calloc BLKSIZE blksize RDEV rdev TZNAME tzname pw @c LocalWords: passwd gecos pwd MBSTATE mbstate wchar RETSIGTYPE hup UID uid @c LocalWords: gid ptrdiff uintmax EXEEXT OBJEXT Ae conftest AXP str @c LocalWords: ALIGNOF WERROR Werror cpp HP's WorkShop egcs un fied stdc CXX @c LocalWords: varargs BIGENDIAN Endianness SPARC endianness grep'ed CONST FC @c LocalWords: const STRINGIZE stringizing PARAMS unprotoize protos KCC cxx @c LocalWords: xlC aCC CXXCPP FREEFORM xlf FLIBS FCLIBS ish SRCEXT XTRA LFS @c LocalWords: ISC lcposix MINIX Minix conditionalized inlines hw dD confdefs @c LocalWords: fputs stdout PREPROC ar UFS HFS QNX realtime fstype STATVFS se @c LocalWords: statvfs STATFS statfs func machfile hdr lelf raboof DEFUN GTK @c LocalWords: GTKMM Grmph ified ine defn baz EOF qar Ahhh changecom algol io @c LocalWords: changeword quadrigraphs quadrigraph dnl SGI atoi overquoting @c LocalWords: Aas Wcross sep args namespace undefine bpatsubst popdef dquote @c LocalWords: bregexp Overquote overquotation meisch maisch meische maische @c LocalWords: miscian DIRNAME dirname MKDIR CATFILE XMKMF TRAVOLTA celsius @c LocalWords: EMX emxos Emacsen Korn DYNIX subshell posix Ksh ksh Pdksh Zsh @c LocalWords: pdksh zsh Allbery Lipe Kubota UWS zorglub stderr eval esac lfn @c LocalWords: drivespec Posixy DJGPP doschk prettybird LPT pfew Zsh's yu yaa @c LocalWords: yM uM aM firebird IP subdir misparses ok Unpatched abc bc zA @c LocalWords: CDPATH DUALCASE LINENO prepass Subshells lineno NULLCMD cmp wc @c LocalWords: MAILPATH scanset arg NetBSD Almquist printf expr cp pR @c LocalWords: Oliva awk Aaaaarg cmd regex xfoo GNV OpenVMS VM url fc @c LocalWords: sparc Proulx nbar nfoo maxdepth acdilrtu TWG mc ing FP @c LocalWords: mkdir exe uname OpenBSD Fileutils mktemp umask TMPDIR guid os @c LocalWords: fooXXXXXX Unicos utimes hpux hppa unescaped SUBST'ed @c LocalWords: pmake DOS's gmake ifoo DESTDIR autoconfiscated pc coff mips gg @c LocalWords: dec ultrix cpu wildcards rpcc rdtsc powerpc readline @c LocalWords: withval vxworks gless localcache usr LOFF loff CYGWIN Cygwin @c LocalWords: cygwin SIGLIST siglist SYSNDIR SYSDIR ptx lseq rusage elif MSC @c LocalWords: lfoo POUNDBANG lsun NIS getpwnam SYSCALLS RSH INTL lintl aix @c LocalWords: intl lx ldir syslog bsd EPI toolchain netbsd objext de KNR nn @c LocalWords: fication LTLIBOBJS Wdiff TESTDIR atconfig atlocal akim XFAIL @c LocalWords: ChangeLog prepended errexit smallexample TESTSUITEFLAGS GPL er @c LocalWords: installcheck autotest indir Pixley Bothner Eichin Kerberos adl @c LocalWords: DISTCLEANFILES preprocessor's fileutils Stallman Murphey Stenn @c LocalWords: Manfredi Autoconfig TCL FSP david zuhn Blandy MACRODIR Raeburn @c LocalWords: autoconfiscate Savoye Haertel Avera Meyering fdl appendixsec @c LocalWords: printindex american LIBOBJDIR LibdirTest ERLCFLAGS OBJCFLAGS @c LocalWords: VER Gnulib online xyes strcpy TYPEOF typeof OBJC objcc objc ln @c LocalWords: GOBJC OTP ERLC erl valloc decr dumpdef errprint incr @c LocalWords: esyscmd len maketemp pushdef substr syscmd sysval translit txt @c LocalWords: sinclude foreach myvar tolower toupper uniq BASENAME STDIN @c LocalWords: Dynix basename aname cname macroexpands xno xcheck iso @c LocalWords: LIBREADLINE lreadline lncurses libreadline vrindex SYS @c LocalWords: syncodeindex define'd caindex CAindex MacKenzie DIRS @c LocalWords: Runtime runtime Submakes submakes MAKEFLAGS whitespace @c LocalWords: Timestamps Unportability Canonicalizing stdalign dirN @c LocalWords: acinclude AMFLAGS LIBS OBJCXXFLAGS GOFLAGS runstatedir @c LocalWords: metacharacter EXPENSIVEP errno setjmp wctype sys mawk @c LocalWords: nawk ggrep egrep gegrep fgrep gfgrep LEX lex yytext nm @c LocalWords: yywrap xflex lexyy YFLAGS yacc divnum libs fuindex ffs @c LocalWords: environ sigaction extern ftello nonnull STRTOLD LLONG @c LocalWords: strtold vfprintf ULLONG strcasecmp strncasecmp MSVC th @c LocalWords: NDEBUG Xenix INO libc ISDIR ISREG Tektronix Amdahl ino @c LocalWords: typedef pxref fileblocks submembers INTMAX intmax UINT @c LocalWords: INTPTR intptr SSIZE ssize uint UINTPTR uintptr OPENMP @c LocalWords: openmp OpenMP omp Alignas Alignof Noreturn UTF vals gl @c LocalWords: offsetof VARARRAYS VLA CCC stdcxx nullptr @c LocalWords: constexpr decltype unicode fstreams iostreams iomanip @c LocalWords: stringstreams GXX OBJCPP OBJCXX objcxx GOBJCXX erlc tx @c LocalWords: OBJCXXCPP FIXEDFORM GFC argc argv shellvar fpp MODEXT @c LocalWords: freeform fixedform MODINC MODOUT gccgo GOC xmkmf fseek @c LocalWords: interpval ftell Interix macOS PTHREAD NonStop XOPEN xc @c LocalWords: IEC ATTRIBS BFP DFP O'Donell Sebor ERTS Erlang's erts @c LocalWords: erlang Wundef scalable USG NOTMAKE DOUCH @c LocalWords: IVE changesyntax ifnotinfo oline num cfg debugfile cdr @c LocalWords: debugmode traceoff traceon patsubst dumpdefs ifelse aa @c LocalWords: mkstemp undivert lifo errprintn BINSH sanitization bcd @c LocalWords: cleardivert bmatch bpatsubsts subst cond nblank ifval @c LocalWords: ifblank ifnblank ifvaln fputc fgetc argn mapall dvarv @c LocalWords: shiftn abcd elt noquote mkargs joinall SHA prereq dup @c LocalWords: listc setb seta ARITH HNUM xcurly xoccupied @c LocalWords: TESTA TESTB TESTC hoc xpg xxyzzyz dtksh nosuch fifos @c LocalWords: fifo Stardent sig WIF WIFSIGNALED SIGQUIT tty perl ret @c LocalWords: SIGINT NUL SFN PRN aeiou MSYS SIGTERM xhi arith UWIN @c LocalWords: CLICOLOR FPATH POSIXLY Shellshock CVE OSF doit ec ci @c LocalWords: notreached cim nc ACL faccessat Alexandre getline sqrt @c LocalWords: CONVFMT FS OFMT CDS chgrp futimens utimensat oo esc od @c LocalWords: ownerships mape readdir mkfifo mknod testsuites XSI rf @c LocalWords: bcdox hexdump filelist rmdir flushleft busybox nl HAZy @c LocalWords: ABCDEFGHIJKLMNOPQRSTUVWXYZ Fantazy FAntAZy adc unix xb @c LocalWords: SUBMAKEFLAGS ehBc ehB hBc hvB dmake hostname nlinit xf @c LocalWords: DCOMMENT bart pathnames ifhtml randx @c LocalWords: sumc hic ic fwrapv ftrapv SIGFPE memset fmudflap ctime @c LocalWords: asctime lvalues lvalue Multithreaded decstation gdb na @c LocalWords: enableval lesskey FHS superset waitpid libfoo cposix @c LocalWords: mem RESTARTABLE bzero DejaGNU EUNIT subfile optarg ive @c LocalWords: nolog expout experr erlflags EUnit testme eunit myprog @c LocalWords: configmake vx bashdb tvtwm questers UUCP McGrath @c LocalWords: ispell @c Local Variables: @c fill-column: 72 @c ispell-local-dictionary: "american" @c indent-tabs-mode: nil @c whitespace-check-buffer-indent: nil @c End: ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������autoconf-2.71/doc/standards.texi��������������������������������������������������������������������0000644�0000000�0000000�00000474701�13765663120�013621� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������\input texinfo @c -*-texinfo-*- @c %**start of header @setfilename standards.info @settitle GNU Coding Standards @c This date is automagically updated when you save this file: @set lastupdate June 12, 2020 @c %**end of header @dircategory GNU organization @direntry * Standards: (standards). GNU coding standards. @end direntry @c @setchapternewpage odd @setchapternewpage off @c Put everything in one index (arbitrarily chosen to be the concept index). @syncodeindex fn cp @syncodeindex ky cp @syncodeindex pg cp @syncodeindex vr cp @c This is used by a cross ref in make-stds.texi @set CODESTD 1 @copying The GNU coding standards, last updated @value{lastupdate}. Copyright @copyright{} 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Free Software Foundation, Inc. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitled ``GNU Free Documentation License''. @end copying @titlepage @title GNU Coding Standards @author Richard Stallman, et al. @author last updated @value{lastupdate} @page @vskip 0pt plus 1filll @insertcopying @end titlepage @contents @ifnottex @node Top @top GNU Coding Standards @insertcopying @end ifnottex @menu * Preface:: About the GNU Coding Standards. * Legal Issues:: Keeping free software free. * Design Advice:: General program design. * Program Behavior:: Program behavior for all programs * Writing C:: Making the best use of C. * Documentation:: Documenting programs. * Managing Releases:: The release process. * References:: Mentioning non-free software or documentation. * GNU Free Documentation License:: Copying and sharing this manual. * Index:: @end menu @node Preface @chapter About the GNU Coding Standards The GNU Coding Standards were written by Richard Stallman and other GNU Project volunteers. Their purpose is to make the GNU system clean, consistent, and easy to install. This document can also be read as a guide to writing portable, robust and reliable programs. It focuses on programs written in C, but many of the rules and principles are useful even if you write in another programming language. The rules often state reasons for writing in a certain way. @cindex where to obtain @code{standards.texi} @cindex downloading this manual If you did not obtain this file directly from the GNU project and recently, please check for a newer version. You can get the GNU Coding Standards from the GNU web server in many different formats, including the Texinfo source, PDF, HTML, DVI, plain text, and more, at: @uref{https://www.gnu.org/prep/standards/}. If you are maintaining an official GNU package, in addition to this document, please read and follow the GNU maintainer information (@pxref{Top, , Contents, maintain, Information for Maintainers of GNU Software}). @cindex @code{gnustandards-commit@@gnu.org} mailing list If you want to receive diffs for every change to these GNU documents, join the mailing list @code{gnustandards-commit@@gnu.org}, via the web interface at @url{https://lists.gnu.org/mailman/listinfo/gnustandards-commit}. Archives are also available there. @cindex @code{bug-standards@@gnu.org} email address @cindex Savannah repository for gnustandards @cindex gnustandards project repository Please send corrections or suggestions for this document to @email{bug-standards@@gnu.org}. If you make a suggestion, please include a suggested new wording for it, to help us consider the suggestion efficiently. We prefer a context diff to the Texinfo source, but if that's difficult for you, you can make a context diff for some other version of this document, or propose it in any way that makes it clear. The source repository for this document can be found at @url{https://savannah.gnu.org/projects/gnustandards}. These standards cover the minimum of what is important when writing a GNU package. Likely, the need for additional standards will come up. Sometimes, you might suggest that such standards be added to this document. If you think your standards would be generally useful, please do suggest them. You should also set standards for your package on many questions not addressed or not firmly specified here. The most important point is to be self-consistent---try to stick to the conventions you pick, and try to document them as much as possible. That way, your program will be more maintainable by others. The GNU Hello program serves as an example of how to follow the GNU coding standards for a trivial program. @uref{https://www.gnu.org/software/hello/hello.html}. This release of the GNU Coding Standards was last updated @value{lastupdate}. @node Legal Issues @chapter Keeping Free Software Free @cindex legal aspects This chapter discusses how you can make sure that GNU software avoids legal difficulties, and other related issues. @menu * Reading Non-Free Code:: Referring to proprietary programs. * Contributions:: Accepting contributions. * Trademarks:: How we deal with trademark issues. @end menu @node Reading Non-Free Code @section Referring to Proprietary Programs @cindex proprietary programs @cindex avoiding proprietary code Don't in any circumstances refer to Unix source code for or during your work on GNU! (Or to any other proprietary programs.) If you have a vague recollection of the internals of a Unix program, this does not absolutely mean you can't write an imitation of it, but do try to organize the imitation internally along different lines, because this is likely to make the details of the Unix version irrelevant and dissimilar to your results. For example, Unix utilities were generally optimized to minimize memory use; if you go for speed instead, your program will be very different. You could keep the entire input file in memory and scan it there instead of using stdio. Use a smarter algorithm discovered more recently than the Unix program. Eliminate use of temporary files. Do it in one pass instead of two (we did this in the assembler). Or, on the contrary, emphasize simplicity instead of speed. For some applications, the speed of today's computers makes simpler algorithms adequate. Or go for generality. For example, Unix programs often have static tables or fixed-size strings, which make for arbitrary limits; use dynamic allocation instead. Make sure your program handles NULs and other funny characters in the input files. Add a programming language for extensibility and write part of the program in that language. Or turn some parts of the program into independently usable libraries. Or use a simple garbage collector instead of tracking precisely when to free memory, or use a new GNU facility such as obstacks. @node Contributions @section Accepting Contributions @cindex legal papers @cindex accepting contributions If the program you are working on is copyrighted by the Free Software Foundation, then when someone else sends you a piece of code to add to the program, we need legal papers to use it---just as we asked you to sign papers initially. @emph{Each} person who makes a nontrivial contribution to a program must sign some sort of legal papers in order for us to have clear title to the program; the main author alone is not enough. So, before adding in any contributions from other people, please tell us, so we can arrange to get the papers. Then wait until we tell you that we have received the signed papers, before you actually use the contribution. This applies both before you release the program and afterward. If you receive diffs to fix a bug, and they make significant changes, we need legal papers for that change. This also applies to comments and documentation files. For copyright law, comments and code are just text. Copyright applies to all kinds of text, so we need legal papers for all kinds. We know it is frustrating to ask for legal papers; it's frustrating for us as well. But if you don't wait, you are going out on a limb---for example, what if the contributor's employer won't sign a disclaimer? You might have to take that code out again! You don't need papers for changes of a few lines here or there, since they are not significant for copyright purposes. Also, you don't need papers if all you get from the suggestion is some ideas, not actual code which you use. For example, if someone sent you one implementation, but you write a different implementation of the same idea, you don't need to get papers. The very worst thing is if you forget to tell us about the other contributor. We could be very embarrassed in court some day as a result. We have more detailed advice for maintainers of GNU packages. If you have reached the stage of maintaining a GNU program (whether released or not), please take a look: @pxref{Legal Matters,,, maintain, Information for GNU Maintainers}. @node Trademarks @section Trademarks @cindex trademarks Please do not include any trademark acknowledgments in GNU software packages or documentation. Trademark acknowledgments are the statements that such-and-such is a trademark of so-and-so. The GNU Project has no objection to the basic idea of trademarks, but these acknowledgments feel like kowtowing, and there is no legal requirement for them, so we don't use them. What is legally required, as regards other people's trademarks, is to avoid using them in ways which a reader might reasonably understand as naming or labeling our own programs or activities. For example, since ``Objective C'' is (or at least was) a trademark, we made sure to say that we provide a ``compiler for the Objective C language'' rather than an ``Objective C compiler''. The latter would have been meant as a shorter way of saying the former, but it does not explicitly state the relationship, so it could be misinterpreted as using ``Objective C'' as a label for the compiler rather than for the language. Please don't use ``win'' as an abbreviation for Microsoft Windows in GNU software or documentation. In hacker terminology, calling something a ``win'' is a form of praise. You're free to praise Microsoft Windows on your own if you want, but please don't do so in GNU packages. Please write ``Windows'' in full, or abbreviate it to ``w.'' @xref{System Portability}. @node Design Advice @chapter General Program Design @cindex program design This chapter discusses some of the issues you should take into account when designing your program. @c Standard or ANSI C @c @c In 1989 the American National Standards Institute (ANSI) standardized @c C as standard X3.159-1989. In December of that year the @c International Standards Organization ISO adopted the ANSI C standard @c making minor changes. In 1990 ANSI then re-adopted ISO standard @c C. This version of C is known as either ANSI C or Standard C. @c A major revision of the C Standard appeared in 1999. @menu * Source Language:: Which languages to use. * Compatibility:: Compatibility with other implementations. * Using Extensions:: Using non-standard features. * Standard C:: Using standard C features. * Conditional Compilation:: Compiling code only if a conditional is true. @end menu @node Source Language @section Which Languages to Use @cindex programming languages When you want to use a language that gets compiled and runs at high speed, the best language to use is C@. C++ is ok too, but please don't make heavy use of templates. So is Java, if you compile it. When highest efficiency is not required, other languages commonly used in the free software community, such as Lisp, Scheme, Python, Ruby, and Java, are OK too. Scheme, as implemented by GNU@tie{}Guile, plays a particular role in the GNU System: it is the preferred language to extend programs written in C/C++, and also a fine language for a wide range of applications. The more GNU components use Guile and Scheme, the more users are able to extend and combine them (@pxref{The Emacs Thesis,,, guile, GNU Guile Reference Manual}). Many programs are designed to be extensible: they include an interpreter for a language that is higher level than C@. Often much of the program is written in that language, too. The Emacs editor pioneered this technique. @cindex Guile @cindex GNOME and Guile The standard extensibility interpreter for GNU software is Guile (@uref{https://www.gnu.org/@/software/@/guile/}), which implements the language Scheme (an especially clean and simple dialect of Lisp). Guile also includes bindings for GTK+/GNOME, making it practical to write modern GUI functionality within Guile. We don't reject programs written in other ``scripting languages'' such as Perl and Python, but using Guile is the path that will lead to overall consistency of the GNU system. @node Compatibility @section Compatibility with Other Implementations @cindex compatibility with C and POSIX standards @cindex C compatibility @cindex POSIX compatibility With occasional exceptions, utility programs and libraries for GNU should be upward compatible with those in Berkeley Unix, and upward compatible with Standard C if Standard C specifies their behavior, and upward compatible with POSIX if POSIX specifies their behavior. When these standards conflict, it is useful to offer compatibility modes for each of them. @cindex options for compatibility Standard C and POSIX prohibit many kinds of extensions. Feel free to make the extensions anyway, and include a @samp{--ansi}, @samp{--posix}, or @samp{--compatible} option to turn them off. However, if the extension has a significant chance of breaking any real programs or scripts, then it is not really upward compatible. So you should try to redesign its interface to make it upward compatible. @cindex @code{POSIXLY_CORRECT}, environment variable Many GNU programs suppress extensions that conflict with POSIX if the environment variable @code{POSIXLY_CORRECT} is defined (even if it is defined with a null value). Please make your program recognize this variable if appropriate. When a feature is used only by users (not by programs or command files), and it is done poorly in Unix, feel free to replace it completely with something totally different and better. (For example, @code{vi} is replaced with Emacs.) But it is nice to offer a compatible feature as well. (There is a free @code{vi} clone, so we offer it.) Additional useful features are welcome regardless of whether there is any precedent for them. @node Using Extensions @section Using Non-standard Features @cindex non-standard extensions Many GNU facilities that already exist support a number of convenient extensions over the comparable Unix facilities. Whether to use these extensions in implementing your program is a difficult question. On the one hand, using the extensions can make a cleaner program. On the other hand, people will not be able to build the program unless the other GNU tools are available. This might cause the program to work on fewer kinds of machines. With some extensions, it might be easy to provide both alternatives. For example, you can define functions with a ``keyword'' @code{INLINE} and define that as a macro to expand into either @code{inline} or nothing, depending on the compiler. In general, perhaps it is best not to use the extensions if you can straightforwardly do without them, but to use the extensions if they are a big improvement. An exception to this rule are the large, established programs (such as Emacs) which run on a great variety of systems. Using GNU extensions in such programs would make many users unhappy, so we don't do that. Another exception is for programs that are used as part of compilation: anything that must be compiled with other compilers in order to bootstrap the GNU compilation facilities. If these require the GNU compiler, then no one can compile them without having them installed already. That would be extremely troublesome in certain cases. @node Standard C @section Standard C and Pre-Standard C @cindex ANSI C standard 1989 Standard C is widespread enough now that it is ok to use its features in programs. There is one exception: do not ever use the ``trigraph'' feature of Standard C. The 1999 and 2011 editions of Standard C are not fully supported on all platforms. If you aim to support compilation by compilers other than GCC, you should not require these C features in your programs. It is ok to use these features conditionally when the compiler supports them. If your program is only meant to compile with GCC, then you can use these features if GCC supports them, when they give substantial benefit. However, it is easy to support pre-standard compilers in most programs, so if you know how to do that, feel free. @cindex function prototypes To support pre-standard C, instead of writing function definitions in standard prototype form, @example int foo (int x, int y) @dots{} @end example @noindent write the definition in pre-standard style like this, @example int foo (x, y) int x, y; @dots{} @end example @noindent and use a separate declaration to specify the argument prototype: @example int foo (int, int); @end example You need such a declaration anyway, in a header file, to get the benefit of prototypes in all the files where the function is called. And once you have the declaration, you normally lose nothing by writing the function definition in the pre-standard style. This technique does not work for integer types narrower than @code{int}. If you think of an argument as being of a type narrower than @code{int}, declare it as @code{int} instead. There are a few special cases where this technique is hard to use. For example, if a function argument needs to hold the system type @code{dev_t}, you run into trouble, because @code{dev_t} is shorter than @code{int} on some machines; but you cannot use @code{int} instead, because @code{dev_t} is wider than @code{int} on some machines. There is no type you can safely use on all machines in a non-standard definition. The only way to support non-standard C and pass such an argument is to check the width of @code{dev_t} using Autoconf and choose the argument type accordingly. This may not be worth the trouble. In order to support pre-standard compilers that do not recognize prototypes, you may want to use a preprocessor macro like this: @example /* Declare the prototype for a general external function. */ #if defined (__STDC__) || defined (WINDOWSNT) #define P_(proto) proto #else #define P_(proto) () #endif @end example @node Conditional Compilation @section Conditional Compilation When supporting configuration options already known when building your program we prefer using @code{if (... )} over conditional compilation, as in the former case the compiler is able to perform more extensive checking of all possible code paths. For example, please write @smallexample if (HAS_FOO) ... else ... @end smallexample @noindent instead of: @smallexample #ifdef HAS_FOO ... #else ... #endif @end smallexample A modern compiler such as GCC will generate exactly the same code in both cases, and we have been using similar techniques with good success in several projects. Of course, the former method assumes that @code{HAS_FOO} is defined as either 0 or 1. While this is not a silver bullet solving all portability problems, and is not always appropriate, following this policy would have saved GCC developers many hours, or even days, per year. In the case of function-like macros like @code{REVERSIBLE_CC_MODE} in GCC which cannot be simply used in @code{if (...)} statements, there is an easy workaround. Simply introduce another macro @code{HAS_REVERSIBLE_CC_MODE} as in the following example: @smallexample #ifdef REVERSIBLE_CC_MODE #define HAS_REVERSIBLE_CC_MODE 1 #else #define HAS_REVERSIBLE_CC_MODE 0 #endif @end smallexample @node Program Behavior @chapter Program Behavior for All Programs This chapter describes conventions for writing robust software. It also describes general standards for error messages, the command line interface, and how libraries should behave. @menu * Non-GNU Standards:: We consider standards such as POSIX; we don't "obey" them. * Semantics:: Writing robust programs. * Libraries:: Library behavior. * Errors:: Formatting error messages. * User Interfaces:: Standards about interfaces generally. * Finding Program Files:: How to find the program's executable and other files that go with it. * Graphical Interfaces:: Standards for graphical interfaces. * Command-Line Interfaces:: Standards for command line interfaces. * Dynamic Plug-In Interfaces:: Standards for dynamic plug-in interfaces. * Option Table:: Table of long options. * OID Allocations:: Table of OID slots for GNU. * Memory Usage:: When and how to care about memory needs. * File Usage:: Which files to use, and where. @end menu @node Non-GNU Standards @section Non-GNU Standards The GNU Project regards standards published by other organizations as suggestions, not orders. We consider those standards, but we do not ``obey'' them. In developing a GNU program, you should implement an outside standard's specifications when that makes the GNU system better overall in an objective sense. When it doesn't, you shouldn't. In most cases, following published standards is convenient for users---it means that their programs or scripts will work more portably. For instance, GCC implements nearly all the features of Standard C as specified by that standard. C program developers would be unhappy if it did not. And GNU utilities mostly follow specifications of POSIX.2; shell script writers and users would be unhappy if our programs were incompatible. But we do not follow either of these specifications rigidly, and there are specific points on which we decided not to follow them, so as to make the GNU system better for users. For instance, Standard C says that nearly all extensions to C are prohibited. How silly! GCC implements many extensions, some of which were later adopted as part of the standard. If you want these constructs to give an error message as ``required'' by the standard, you must specify @samp{--pedantic}, which was implemented only so that we can say ``GCC is a 100% implementation of the standard'', not because there is any reason to actually use it. POSIX.2 specifies that @samp{df} and @samp{du} must output sizes by default in units of 512 bytes. What users want is units of 1k, so that is what we do by default. If you want the ridiculous behavior ``required'' by POSIX, you must set the environment variable @samp{POSIXLY_CORRECT} (which was originally going to be named @samp{POSIX_ME_HARDER}). GNU utilities also depart from the letter of the POSIX.2 specification when they support long-named command-line options, and intermixing options with ordinary arguments. This minor incompatibility with POSIX is never a problem in practice, and it is very useful. In particular, don't reject a new feature, or remove an old one, merely because a standard says it is ``forbidden'' or ``deprecated''. @node Semantics @section Writing Robust Programs @cindex arbitrary limits on data Avoid arbitrary limits on the length or number of @emph{any} data structure, including file names, lines, files, and symbols, by allocating all data structures dynamically. In most Unix utilities, ``long lines are silently truncated''. This is not acceptable in a GNU utility. @cindex @code{NUL} characters @findex libiconv Utilities reading files should not drop NUL characters, or any other nonprinting characters. Programs should work properly with multibyte character encodings, such as UTF-8. You can use libiconv to deal with a range of encodings. @cindex error messages Check every system call for an error return, unless you know you wish to ignore errors. Include the system error text (from @code{strerror}, or equivalent) in @emph{every} error message resulting from a failing system call, as well as the name of the file if any and the name of the utility. Just ``cannot open foo.c'' or ``stat failed'' is not sufficient. @cindex @code{malloc} return value @cindex memory allocation failure Check every call to @code{malloc} or @code{realloc} to see if it returned @code{NULL}. Check @code{realloc} even if you are making the block smaller; in a system that rounds block sizes to a power of 2, @code{realloc} may get a different block if you ask for less space. You must expect @code{free} to alter the contents of the block that was freed. Anything you want to fetch from the block, you must fetch before calling @code{free}. If @code{malloc} fails in a noninteractive program, make that a fatal error. In an interactive program (one that reads commands from the user), it is better to abort the command and return to the command reader loop. This allows the user to kill other processes to free up virtual memory, and then try the command again. @cindex command-line arguments, decoding Use @code{getopt_long} to decode arguments, unless the argument syntax makes this unreasonable. When static storage is to be written in during program execution, use explicit C code to initialize it. This way, restarting the program (without reloading it), or part of it, will reinitialize those variables. Reserve C initialized declarations for data that will not be changed. @c ADR: why? Try to avoid low-level interfaces to obscure Unix data structures (such as file directories, utmp, or the layout of kernel memory), since these are less likely to work compatibly. If you need to find all the files in a directory, use @code{readdir} or some other high-level interface. These are supported compatibly by GNU. @cindex signal handling The preferred signal handling facilities are the BSD variant of @code{signal}, and the POSIX @code{sigaction} function; the alternative USG @code{signal} interface is an inferior design. Nowadays, using the POSIX signal functions may be the easiest way to make a program portable. If you use @code{signal}, then on GNU/Linux systems running GNU libc version 1, you should include @file{bsd/signal.h} instead of @file{signal.h}, so as to get BSD behavior. It is up to you whether to support systems where @code{signal} has only the USG behavior, or give up on them. @cindex impossible conditions In error checks that detect ``impossible'' conditions, just abort. There is usually no point in printing any message. These checks indicate the existence of bugs. Whoever wants to fix the bugs will have to read the source code and run a debugger. So explain the problem with comments in the source. The relevant data will be in variables, which are easy to examine with the debugger, so there is no point moving them elsewhere. Do not use a count of errors as the exit status for a program. @emph{That does not work}, because exit status values are limited to 8 bits (0 through 255). A single run of the program might have 256 errors; if you try to return 256 as the exit status, the parent process will see 0 as the status, and it will appear that the program succeeded. @cindex temporary files @cindex @code{TMPDIR} environment variable If you make temporary files, check the @code{TMPDIR} environment variable; if that variable is defined, use the specified directory instead of @file{/tmp}. In addition, be aware that there is a possible security problem when creating temporary files in world-writable directories. In C, you can avoid this problem by creating temporary files in this manner: @example fd = open (filename, O_WRONLY | O_CREAT | O_EXCL, 0600); @end example @noindent or by using the @code{mkstemps} function from Gnulib (@pxref{mkstemps,,, gnulib, Gnulib}). In bash, use @code{set -C} (long name @code{noclobber}) to avoid this problem. In addition, the @code{mktemp} utility is a more general solution for creating temporary files from shell scripts (@pxref{mktemp invocation,,, coreutils, GNU Coreutils}). @node Libraries @section Library Behavior @cindex libraries Try to make library functions reentrant. If they need to do dynamic storage allocation, at least try to avoid any nonreentrancy aside from that of @code{malloc} itself. Here are certain name conventions for libraries, to avoid name conflicts. Choose a name prefix for the library, more than two characters long. All external function and variable names should start with this prefix. In addition, there should only be one of these in any given library member. This usually means putting each one in a separate source file. An exception can be made when two external symbols are always used together, so that no reasonable program could use one without the other; then they can both go in the same file. External symbols that are not documented entry points for the user should have names beginning with @samp{_}. The @samp{_} should be followed by the chosen name prefix for the library, to prevent collisions with other libraries. These can go in the same files with user entry points if you like. Static functions and variables can be used as you like and need not fit any naming convention. @node Errors @section Formatting Error Messages @cindex formatting error messages @cindex error messages, formatting Error messages from compilers should look like this: @example @var{sourcefile}:@var{lineno}: @var{message} @end example @noindent If you want to mention the column number, use one of these formats: @example @var{sourcefile}:@var{lineno}:@var{column}: @var{message} @var{sourcefile}:@var{lineno}.@var{column}: @var{message} @end example @noindent Line numbers should start from 1 at the beginning of the file, and column numbers should start from 1 at the beginning of the line. (Both of these conventions are chosen for compatibility.) Calculate column numbers assuming that space and all ASCII printing characters have equal width, and assuming tab stops every 8 columns. For non-ASCII characters, Unicode character widths should be used when in a UTF-8 locale; GNU libc and GNU gnulib provide suitable @code{wcwidth} functions. The error message can also give both the starting and ending positions of the erroneous text. There are several formats so that you can avoid redundant information such as a duplicate line number. Here are the possible formats: @example @var{sourcefile}:@var{line1}.@var{column1}-@var{line2}.@var{column2}: @var{message} @var{sourcefile}:@var{line1}.@var{column1}-@var{column2}: @var{message} @var{sourcefile}:@var{line1}-@var{line2}: @var{message} @end example @noindent When an error is spread over several files, you can use this format: @example @var{file1}:@var{line1}.@var{column1}-@var{file2}:@var{line2}.@var{column2}: @var{message} @end example Error messages from other noninteractive programs should look like this: @example @var{program}:@var{sourcefile}:@var{lineno}: @var{message} @end example @noindent when there is an appropriate source file, or like this: @example @var{program}: @var{message} @end example @noindent when there is no relevant source file. If you want to mention the column number, use this format: @example @var{program}:@var{sourcefile}:@var{lineno}:@var{column}: @var{message} @end example In an interactive program (one that is reading commands from a terminal), it is better not to include the program name in an error message. The place to indicate which program is running is in the prompt or with the screen layout. (When the same program runs with input from a source other than a terminal, it is not interactive and would do best to print error messages using the noninteractive style.) The string @var{message} should not begin with a capital letter when it follows a program name and/or file name, because that isn't the beginning of a sentence. (The sentence conceptually starts at the beginning of the line.) Also, it should not end with a period. Error messages from interactive programs, and other messages such as usage messages, should start with a capital letter. But they should not end with a period. @node User Interfaces @section Standards for Interfaces Generally @cindex program name and its behavior @cindex behavior, dependent on program's name Please don't make the behavior of a utility depend on the name used to invoke it. It is useful sometimes to make a link to a utility with a different name, and that should not change what it does. Thus, if you make @file{foo} a link to @file{ls}, the program should behave the same regardless of which of those names is used to invoke it. Instead, use a run time option or a compilation switch or both to select among the alternate behaviors. You can also build two versions of the program, with different default behaviors, and install them under two different names. @cindex output device and program's behavior Likewise, please don't make the behavior of a command-line program depend on the type of output device it gets as standard output or standard input. Device independence is an important principle of the system's design; do not compromise it merely to save someone from typing an option now and then. (Variation in error message syntax when using a terminal is ok, because that is a side issue that people do not depend on.) If you think one behavior is most useful when the output is to a terminal, and another is most useful when the output is a file or a pipe, then it is usually best to make the default behavior the one that is useful with output to a terminal, and have an option for the other behavior. You can also build two different versions of the program with different names. There is an exception for programs whose output in certain cases is binary data. Sending such output to a terminal is useless and can cause trouble. If such a program normally sends its output to stdout, it should detect, in these cases, when the output is a terminal and give an error message instead. The @code{-f} option should override this exception, thus permitting the output to go to the terminal. Compatibility requires certain programs to depend on the type of output device. It would be disastrous if @code{ls} or @code{sh} did not do so in the way all users expect. In some of these cases, we supplement the program with a preferred alternate version that does not depend on the output device type. For example, we provide a @code{dir} program much like @code{ls} except that its default output format is always multi-column format. @node Finding Program Files @section Finding the Program's Executable and Associated Files A program may need to find the executable file it was started with, so as to relaunch the same program. It may need to find associated files, either source files or files constructed by building, that it uses at run time. The way to find them starts with looking at @code{argv[0]}. If that string contains a slash, it is by convention the file name of the executable and its directory part is the directory that contained the executable. This is the case when the program was not found through @env{PATH}, which normally means it was built but not installed, and run from the build directory. The program can use the @code{argv[0]} file name to relaunch itself, and can look in its directory part for associated files. If that file name is not absolute, then it is relative to the working directory in which the program started. If @code{argv[0]} does not contain a slash, it is a command name whose executable was found via @env{PATH}. The program should search for that name in the directories in @env{PATH}, interpreting @file{.} as the working directory that was current when the program started. If this procedure finds the executable, we call the directory it was found in the @dfn{invocation directory}. The program should check for the presence in that directory of the associated files it needs. If the program's executable is normally built in a subdirectory of the main build directory, and the main build directory contains associated files (perhaps including subdirectories), the program should look at the parent of the invocation directory, checking for the associated files and subdirectories the main build directory should contain. If the invocation directory doesn't contain what's needed, but the executable file name is a symbolic link, the program should try using the link target's containing directory as the invocation directory. If this procedure doesn't come up with an invocation directory that is valid---normally the case for an installed program that was found via @env{PATH}---the program should look for the associated files in the directories where the program's makefile installs them. @xref{Directory Variables}. Providing valid information in @code{argv[0]} is a convention, not guaranteed. Well-behaved programs that launch other programs, such as shells, follow the convention; your code should follow it too, when launching other programs. But it is always possible to launch the program and give a nonsensical value in @code{argv[0]}. Therefore, any program that needs to know the location of its executable, or that of of other associated files, should offer the user environment variables to specify those locations explicitly. @strong{Don't give special privilege, such as with the @code{setuid} bit, to programs that will search heuristically for associated files or for their own executables when invoked that way.} Limit that privilege to programs that find associated files in hard-coded installed locations such as under @file{/usr} and @file{/etc}. @c ??? Is even that safe, in a setuid program? @xref{Bourne Shell Variables,,, bash, Bash Reference Manual}, for more information about @env{PATH}. @node Graphical Interfaces @section Standards for Graphical Interfaces @cindex graphical user interface @cindex interface styles @cindex user interface styles @cindex GTK+ @cindex GNUstep When you write a program that provides a graphical user interface, please make it work with the X Window System, using the GTK+ toolkit or the GNUstep toolkit, unless the functionality specifically requires some alternative (for example, ``displaying jpeg images while in console mode''). In addition, please provide a command-line interface to control the functionality. (In many cases, the graphical user interface can be a separate program which invokes the command-line program.) This is so that the same jobs can be done from scripts. @cindex CORBA @cindex GNOME @cindex D-bus @cindex keyboard interface @cindex library interface Please also consider providing a D-bus interface for use from other running programs, such as within GNOME@. (GNOME used to use CORBA for this, but that is being phased out.) In addition, consider providing a library interface (for use from C), and perhaps a keyboard-driven console interface (for use by users from console mode). Once you are doing the work to provide the functionality and the graphical interface, these won't be much extra work. Please make your program interoperate with access technology such as screen readers (see @url{https://www.gnu.org/accessibility/accessibility.html}). This should be automatic if you use GTK+. @node Command-Line Interfaces @section Standards for Command Line Interfaces @cindex command-line interface @findex getopt It is a good idea to follow the POSIX guidelines for the command-line options of a program. The easiest way to do this is to use @code{getopt} to parse them. Note that the GNU version of @code{getopt} will normally permit options anywhere among the arguments unless the special argument @samp{--} is used. This is not what POSIX specifies; it is a GNU extension. @cindex long-named options Please define long-named options that are equivalent to the single-letter Unix-style options. We hope to make GNU more user friendly this way. This is easy to do with the GNU function @code{getopt_long}. One of the advantages of long-named options is that they can be consistent from program to program. For example, users should be able to expect the ``verbose'' option of any GNU program which has one, to be spelled precisely @samp{--verbose}. To achieve this uniformity, look at the table of common long-option names when you choose the option names for your program (@pxref{Option Table}). It is usually a good idea for file names given as ordinary arguments to be input files only; any output files would be specified using options (preferably @samp{-o} or @samp{--output}). Even if you allow an output file name as an ordinary argument for compatibility, try to provide an option as another way to specify it. This will lead to more consistency among GNU utilities, and fewer idiosyncrasies for users to remember. @cindex standard command-line options @cindex options, standard command-line @cindex CGI programs, standard options for @cindex PATH_INFO, specifying standard options as All programs should support two standard options: @samp{--version} and @samp{--help}. CGI programs should accept these as command-line options, and also if given as the @env{PATH_INFO}; for instance, visiting @indicateurl{http://example.org/p.cgi/--help} in a browser should output the same information as invoking @samp{p.cgi --help} from the command line. @menu * --version:: The standard output for --version. * --help:: The standard output for --help. @end menu @node --version @subsection @option{--version} @cindex @samp{--version} output The standard @code{--version} option should direct the program to print information about its name, version, origin and legal status, all on standard output, and then exit successfully. Other options and arguments should be ignored once this is seen, and the program should not perform its normal function. @cindex canonical name of a program @cindex program's canonical name The first line is meant to be easy for a program to parse; the version number proper starts after the last space. In addition, it contains the canonical name for this program, in this format: @example GNU Emacs 19.30 @end example @noindent The program's name should be a constant string; @emph{don't} compute it from @code{argv[0]}. The idea is to state the standard or canonical name for the program, not its file name. There are other ways to find out the precise file name where a command is found in @code{PATH}. If the program is a subsidiary part of a larger package, mention the package name in parentheses, like this: @example emacsserver (GNU Emacs) 19.30 @end example @noindent If the package has a version number which is different from this program's version number, you can mention the package version number just before the close-parenthesis. If you @emph{need} to mention the version numbers of libraries which are distributed separately from the package which contains this program, you can do so by printing an additional line of version info for each library you want to mention. Use the same format for these lines as for the first line. Please do not mention all of the libraries that the program uses ``just for completeness''---that would produce a lot of unhelpful clutter. Please mention library version numbers only if you find in practice that they are very important to you in debugging. The following line, after the version number line or lines, should be a copyright notice. If more than one copyright notice is called for, put each on a separate line. Next should follow a line stating the license, preferably using one of abbreviations below, and a brief statement that the program is free software, and that users are free to copy and change it. Also mention that there is no warranty, to the extent permitted by law. See recommended wording below. It is ok to finish the output with a list of the major authors of the program, as a way of giving credit. Here's an example of output that follows these rules: @smallexample GNU hello 2.3 Copyright (C) 2007 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html> This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. @end smallexample You should adapt this to your program, of course, filling in the proper year, copyright holder, name of program, and the references to distribution terms, and changing the rest of the wording as necessary. This copyright notice only needs to mention the most recent year in which changes were made---there's no need to list the years for previous versions' changes. You don't have to mention the name of the program in these notices, if that is inconvenient, since it appeared in the first line. (The rules are different for copyright notices in source files; @pxref{Copyright Notices,,,maintain,Information for GNU Maintainers}.) Translations of the above lines must preserve the validity of the copyright notices (@pxref{Internationalization}). If the translation's character set supports it, the @samp{(C)} should be replaced with the copyright symbol, as follows: @ifinfo (the official copyright symbol, which is the letter C in a circle); @end ifinfo @ifnotinfo @copyright{} @end ifnotinfo Write the word ``Copyright'' exactly like that, in English. Do not translate it into another language. International treaties recognize the English word ``Copyright''; translations into other languages do not have legal significance. Finally, here is the table of our suggested license abbreviations. Any abbreviation can be followed by @samp{v@var{version}[+]}, meaning that particular version, or later versions with the @samp{+}, as shown above. In the case of a GNU license, @emph{always} indicate the permitted versions in this way. In the case of exceptions for extra permissions with the GPL, we use @samp{/} for a separator; the version number can follow the license abbreviation as usual, as in the examples below. @table @asis @item GPL GNU General Public License, @url{https://www.gnu.org/@/licenses/@/gpl.html}. @item LGPL GNU Lesser General Public License, @url{https://www.gnu.org/@/licenses/@/lgpl.html}. @item GPL/Ada GNU GPL with the exception for Ada. @item Apache The Apache Software Foundation license, @url{https://directory.fsf.org/@/wiki/@/License:Apache2.0}. @item Artistic The Artistic license used for Perl, @url{https://directory.fsf.org/@/wiki/@/License:ArtisticLicense2.0}. @item Expat The Expat license, @url{https://directory.fsf.org/@/wiki/@/License:Expat}. @item MPL The Mozilla Public License, @url{https://directory.fsf.org/@/wiki/@/License:MPLv2.0}. @item OBSD The original (4-clause) BSD license, incompatible with the GNU GPL,@* @url{https://directory.fsf.org/@/wiki/@/License:BSD_4Clause}. @item PHP The license used for PHP, @url{https://directory.fsf.org/@/wiki/@/License:PHPv3.01}. @item public domain The non-license that is being in the public domain,@* @url{https://www.gnu.org/@/licenses/@/license-list.html#PublicDomain}. @item Python The license for Python, @url{https://directory.fsf.org/@/wiki/@/License:Python2.0.1}. @item RBSD The revised (3-clause) BSD, compatible with the GNU GPL,@* @url{https://directory.fsf.org/@/wiki/@/License:BSD_3Clause}. @item X11 The simple non-copyleft license used for most versions of the X Window System, @url{https://directory.fsf.org/@/wiki/@/License:X11}. @item Zlib The license for Zlib, @url{https://directory.fsf.org/@/wiki/@/License:Zlib}. @end table More information about these licenses and many more are on the GNU licensing web pages, @url{https://www.gnu.org/@/licenses/@/license-list.html}. @node --help @subsection @option{--help} @cindex @samp{--help} output The standard @code{--help} option should output brief documentation for how to invoke the program, on standard output, then exit successfully. Other options and arguments should be ignored once this is seen, and the program should not perform its normal function. @cindex address for bug reports @cindex bug reports Near the end of the @samp{--help} option's output, please place lines giving the email address for bug reports, the package's home page (normally @indicateurl{https://www.gnu.org/software/@var{pkg}}, and the general page for help using GNU programs. The format should be like this: @example Report bugs to: @var{mailing-address} @var{pkg} home page: <https://www.gnu.org/software/@var{pkg}/> General help using GNU software: <https://www.gnu.org/gethelp/> @end example It is ok to mention other appropriate mailing lists and web pages. @node Dynamic Plug-In Interfaces @section Standards for Dynamic Plug-in Interfaces @cindex plug-ins @cindex dynamic plug-ins Another aspect of keeping free programs free is encouraging development of free plug-ins, and discouraging development of proprietary plug-ins. Many GNU programs will not have anything like plug-ins at all, but those that do should follow these practices. First, the general plug-in architecture design should closely tie the plug-in to the original code, such that the plug-in and the base program are parts of one extended program. For GCC, for example, plug-ins receive and modify GCC's internal data structures, and so clearly form an extended program with the base GCC. @vindex plugin_is_GPL_compatible Second, you should require plug-in developers to affirm that their plug-ins are released under an appropriate license. This should be enforced with a simple programmatic check. For GCC, again for example, a plug-in must define the global symbol @code{plugin_is_GPL_compatible}, thus asserting that the plug-in is released under a GPL-compatible license (@pxref{Plugins,, Plugins, gccint, GCC Internals}). By adding this check to your program you are not creating a new legal requirement. The GPL itself requires plug-ins to be free software, licensed compatibly. As long as you have followed the first rule above to keep plug-ins closely tied to your original program, the GPL and AGPL already require those plug-ins to be released under a compatible license. The symbol definition in the plug-in---or whatever equivalent works best in your program---makes it harder for anyone who might distribute proprietary plug-ins to legally defend themselves. If a case about this got to court, we can point to that symbol as evidence that the plug-in developer understood that the license had this requirement. @node Option Table @section Table of Long Options @cindex long option names @cindex table of long options Here is a table of long options used by GNU programs. It is surely incomplete, but we aim to list all the options that a new program might want to be compatible with. If you use names not already in the table, please send @email{bug-standards@@gnu.org} a list of them, with their meanings, so we can update the table. @c Please leave newlines between items in this table; it's much easier @c to update when it isn't completely squashed together and unreadable. @c When there is more than one short option for a long option name, put @c a semicolon between the lists of the programs that use them, not a @c period. --friedman @table @samp @item after-date @samp{-N} in @code{tar}. @item all @samp{-a} in @code{du}, @code{ls}, @code{nm}, @code{stty}, @code{uname}, and @code{unexpand}. @item all-text @samp{-a} in @code{diff}. @item almost-all @samp{-A} in @code{ls}. @item append @samp{-a} in @code{etags}, @code{tee}, @code{time}; @samp{-r} in @code{tar}. @item archive @samp{-a} in @code{cp}. @item archive-name @samp{-n} in @code{shar}. @item arglength @samp{-l} in @code{m4}. @item ascii @samp{-a} in @code{diff}. @item assign @samp{-v} in @code{gawk}. @item assume-new @samp{-W} in @code{make}. @item assume-old @samp{-o} in @code{make}. @item auto-check @samp{-a} in @code{recode}. @item auto-pager @samp{-a} in @code{wdiff}. @item auto-reference @samp{-A} in @code{ptx}. @item avoid-wraps @samp{-n} in @code{wdiff}. @item background For server programs, run in the background. @item backward-search @samp{-B} in @code{ctags}. @item basename @samp{-f} in @code{shar}. @item batch Used in GDB. @item baud Used in GDB. @item before @samp{-b} in @code{tac}. @item binary @samp{-b} in @code{cpio} and @code{diff}. @item bits-per-code @samp{-b} in @code{shar}. @item block-size Used in @code{cpio} and @code{tar}. @item blocks @samp{-b} in @code{head} and @code{tail}. @item break-file @samp{-b} in @code{ptx}. @item brief Used in various programs to make output shorter. @item bytes @samp{-c} in @code{head}, @code{split}, and @code{tail}. @item c@t{++} @samp{-C} in @code{etags}. @item catenate @samp{-A} in @code{tar}. @item cd Used in various programs to specify the directory to use. @item changes @samp{-c} in @code{chgrp} and @code{chown}. @item classify @samp{-F} in @code{ls}. @item colons @samp{-c} in @code{recode}. @item command @samp{-c} in @code{su}; @samp{-x} in GDB. @item compare @samp{-d} in @code{tar}. @item compat Used in @code{gawk}. @item compress @samp{-Z} in @code{tar} and @code{shar}. @item concatenate @samp{-A} in @code{tar}. @item confirmation @samp{-w} in @code{tar}. @item context Used in @code{diff}. @item copyleft @samp{-W copyleft} in @code{gawk}. @item copyright @samp{-C} in @code{ptx}, @code{recode}, and @code{wdiff}; @samp{-W copyright} in @code{gawk}. @item core Used in GDB. @item count @samp{-q} in @code{who}. @item count-links @samp{-l} in @code{du}. @item create Used in @code{tar} and @code{cpio}. @item cut-mark @samp{-c} in @code{shar}. @item cxref @samp{-x} in @code{ctags}. @item date @samp{-d} in @code{touch}. @item debug @samp{-d} in @code{make} and @code{m4}; @samp{-t} in Bison. @item define @samp{-D} in @code{m4}. @item defines @samp{-d} in Bison and @code{ctags}. @item delete @samp{-D} in @code{tar}. @item dereference @samp{-L} in @code{chgrp}, @code{chown}, @code{cpio}, @code{du}, @code{ls}, and @code{tar}. @item dereference-args @samp{-D} in @code{du}. @item device Specify an I/O device (special file name). @item diacritics @samp{-d} in @code{recode}. @item dictionary-order @samp{-d} in @code{look}. @item diff @samp{-d} in @code{tar}. @item digits @samp{-n} in @code{csplit}. @item directory Specify the directory to use, in various programs. In @code{ls}, it means to show directories themselves rather than their contents. In @code{rm} and @code{ln}, it means to not treat links to directories specially. @item discard-all @samp{-x} in @code{strip}. @item discard-locals @samp{-X} in @code{strip}. @item dry-run @samp{-n} in @code{make}. @item ed @samp{-e} in @code{diff}. @item elide-empty-files @samp{-z} in @code{csplit}. @item end-delete @samp{-x} in @code{wdiff}. @item end-insert @samp{-z} in @code{wdiff}. @item entire-new-file @samp{-N} in @code{diff}. @item environment-overrides @samp{-e} in @code{make}. @item eof @samp{-e} in @code{xargs}. @item epoch Used in GDB. @item error-limit Used in @code{makeinfo}. @item error-output @samp{-o} in @code{m4}. @item escape @samp{-b} in @code{ls}. @item exclude-from @samp{-X} in @code{tar}. @item exec Used in GDB. @item exit @samp{-x} in @code{xargs}. @item exit-0 @samp{-e} in @code{unshar}. @item expand-tabs @samp{-t} in @code{diff}. @item expression @samp{-e} in @code{sed}. @item extern-only @samp{-g} in @code{nm}. @item extract @samp{-i} in @code{cpio}; @samp{-x} in @code{tar}. @item faces @samp{-f} in @code{finger}. @item fast @samp{-f} in @code{su}. @item fatal-warnings @samp{-E} in @code{m4}. @item file @samp{-f} in @code{gawk}, @code{info}, @code{make}, @code{mt}, @code{sed}, and @code{tar}. @item field-separator @samp{-F} in @code{gawk}. @item file-prefix @samp{-b} in Bison. @item file-type @samp{-F} in @code{ls}. @item files-from @samp{-T} in @code{tar}. @item fill-column Used in @code{makeinfo}. @item flag-truncation @samp{-F} in @code{ptx}. @item fixed-output-files @samp{-y} in Bison. @item follow @samp{-f} in @code{tail}. @item footnote-style Used in @code{makeinfo}. @item force @samp{-f} in @code{cp}, @code{ln}, @code{mv}, and @code{rm}. @item force-prefix @samp{-F} in @code{shar}. @item foreground For server programs, run in the foreground; in other words, don't do anything special to run the server in the background. @item format Used in @code{ls}, @code{time}, and @code{ptx}. @item freeze-state @samp{-F} in @code{m4}. @item fullname Used in GDB. @item gap-size @samp{-g} in @code{ptx}. @item get @samp{-x} in @code{tar}. @item graphic @samp{-i} in @code{ul}. @item graphics @samp{-g} in @code{recode}. @item group @samp{-g} in @code{install}. @item gzip @samp{-z} in @code{tar} and @code{shar}. @item hashsize @samp{-H} in @code{m4}. @item header @samp{-h} in @code{objdump} and @code{recode} @item heading @samp{-H} in @code{who}. @item help Used to ask for brief usage information. @item here-delimiter @samp{-d} in @code{shar}. @item hide-control-chars @samp{-q} in @code{ls}. @item html In @code{makeinfo}, output HTML. @item idle @samp{-u} in @code{who}. @item ifdef @samp{-D} in @code{diff}. @item ignore @samp{-I} in @code{ls}; @samp{-x} in @code{recode}. @item ignore-all-space @samp{-w} in @code{diff}. @item ignore-backups @samp{-B} in @code{ls}. @item ignore-blank-lines @samp{-B} in @code{diff}. @item ignore-case @samp{-f} in @code{look} and @code{ptx}; @samp{-i} in @code{diff} and @code{wdiff}. @item ignore-errors @samp{-i} in @code{make}. @item ignore-file @samp{-i} in @code{ptx}. @item ignore-indentation @samp{-I} in @code{etags}. @item ignore-init-file @samp{-f} in Oleo. @item ignore-interrupts @samp{-i} in @code{tee}. @item ignore-matching-lines @samp{-I} in @code{diff}. @item ignore-space-change @samp{-b} in @code{diff}. @item ignore-zeros @samp{-i} in @code{tar}. @item include @samp{-i} in @code{etags}; @samp{-I} in @code{m4}. @item include-dir @samp{-I} in @code{make}. @item incremental @samp{-G} in @code{tar}. @item info @samp{-i}, @samp{-l}, and @samp{-m} in Finger. @item init-file In some programs, specify the name of the file to read as the user's init file. @item initial @samp{-i} in @code{expand}. @item initial-tab @samp{-T} in @code{diff}. @item inode @samp{-i} in @code{ls}. @item interactive @samp{-i} in @code{cp}, @code{ln}, @code{mv}, @code{rm}; @samp{-e} in @code{m4}; @samp{-p} in @code{xargs}; @samp{-w} in @code{tar}. @item intermix-type @samp{-p} in @code{shar}. @item iso-8601 Used in @code{date} @item jobs @samp{-j} in @code{make}. @item just-print @samp{-n} in @code{make}. @item keep-going @samp{-k} in @code{make}. @item keep-files @samp{-k} in @code{csplit}. @item kilobytes @samp{-k} in @code{du} and @code{ls}. @item language @samp{-l} in @code{etags}. @item less-mode @samp{-l} in @code{wdiff}. @item level-for-gzip @samp{-g} in @code{shar}. @item line-bytes @samp{-C} in @code{split}. @item lines Used in @code{split}, @code{head}, and @code{tail}. @item link @samp{-l} in @code{cpio}. @item lint @itemx lint-old Used in @code{gawk}. @item list @samp{-t} in @code{cpio}; @samp{-l} in @code{recode}. @item list @samp{-t} in @code{tar}. @item literal @samp{-N} in @code{ls}. @item load-average @samp{-l} in @code{make}. @item login Used in @code{su}. @item machine Used in @code{uname}. @item macro-name @samp{-M} in @code{ptx}. @item mail @samp{-m} in @code{hello} and @code{uname}. @item make-directories @samp{-d} in @code{cpio}. @item makefile @samp{-f} in @code{make}. @item mapped Used in GDB. @item max-args @samp{-n} in @code{xargs}. @item max-chars @samp{-n} in @code{xargs}. @item max-lines @samp{-l} in @code{xargs}. @item max-load @samp{-l} in @code{make}. @item max-procs @samp{-P} in @code{xargs}. @item mesg @samp{-T} in @code{who}. @item message @samp{-T} in @code{who}. @item minimal @samp{-d} in @code{diff}. @item mixed-uuencode @samp{-M} in @code{shar}. @item mode @samp{-m} in @code{install}, @code{mkdir}, and @code{mkfifo}. @item modification-time @samp{-m} in @code{tar}. @item multi-volume @samp{-M} in @code{tar}. @item name-prefix @samp{-a} in Bison. @item nesting-limit @samp{-L} in @code{m4}. @item net-headers @samp{-a} in @code{shar}. @item new-file @samp{-W} in @code{make}. @item no-builtin-rules @samp{-r} in @code{make}. @item no-character-count @samp{-w} in @code{shar}. @item no-check-existing @samp{-x} in @code{shar}. @item no-common @samp{-3} in @code{wdiff}. @item no-create @samp{-c} in @code{touch}. @item no-defines @samp{-D} in @code{etags}. @item no-deleted @samp{-1} in @code{wdiff}. @item no-dereference @samp{-d} in @code{cp}. @item no-inserted @samp{-2} in @code{wdiff}. @item no-keep-going @samp{-S} in @code{make}. @item no-lines @samp{-l} in Bison. @item no-piping @samp{-P} in @code{shar}. @item no-prof @samp{-e} in @code{gprof}. @item no-regex @samp{-R} in @code{etags}. @item no-sort @samp{-p} in @code{nm}. @item no-splash Don't print a startup splash screen. @item no-split Used in @code{makeinfo}. @item no-static @samp{-a} in @code{gprof}. @item no-time @samp{-E} in @code{gprof}. @item no-timestamp @samp{-m} in @code{shar}. @item no-validate Used in @code{makeinfo}. @item no-wait Used in @code{emacsclient}. @item no-warn Used in various programs to inhibit warnings. @item node @samp{-n} in @code{info}. @item nodename @samp{-n} in @code{uname}. @item nonmatching @samp{-f} in @code{cpio}. @item nstuff @samp{-n} in @code{objdump}. @item null @samp{-0} in @code{xargs}. @item number @samp{-n} in @code{cat}. @item number-nonblank @samp{-b} in @code{cat}. @item numeric-sort @samp{-n} in @code{nm}. @item numeric-uid-gid @samp{-n} in @code{cpio} and @code{ls}. @item nx Used in GDB. @item old-archive @samp{-o} in @code{tar}. @item old-file @samp{-o} in @code{make}. @item one-file-system @samp{-l} in @code{tar}, @code{cp}, and @code{du}. @item only-file @samp{-o} in @code{ptx}. @item only-prof @samp{-f} in @code{gprof}. @item only-time @samp{-F} in @code{gprof}. @item options @samp{-o} in @code{getopt}, @code{fdlist}, @code{fdmount}, @code{fdmountd}, and @code{fdumount}. @item output In various programs, specify the output file name. @item output-prefix @samp{-o} in @code{shar}. @item override @samp{-o} in @code{rm}. @item overwrite @samp{-c} in @code{unshar}. @item owner @samp{-o} in @code{install}. @item paginate @samp{-l} in @code{diff}. @item paragraph-indent Used in @code{makeinfo}. @item parents @samp{-p} in @code{mkdir} and @code{rmdir}. @item pass-all @samp{-p} in @code{ul}. @item pass-through @samp{-p} in @code{cpio}. @item port @samp{-P} in @code{finger}. @item portability @samp{-c} in @code{cpio} and @code{tar}. @item posix Used in @code{gawk}. @item prefix-builtins @samp{-P} in @code{m4}. @item prefix @samp{-f} in @code{csplit}. @item preserve Used in @code{tar} and @code{cp}. @item preserve-environment @samp{-p} in @code{su}. @item preserve-modification-time @samp{-m} in @code{cpio}. @item preserve-order @samp{-s} in @code{tar}. @item preserve-permissions @samp{-p} in @code{tar}. @item print @samp{-l} in @code{diff}. @item print-chars @samp{-L} in @code{cmp}. @item print-data-base @samp{-p} in @code{make}. @item print-directory @samp{-w} in @code{make}. @item print-file-name @samp{-o} in @code{nm}. @item print-symdefs @samp{-s} in @code{nm}. @item printer @samp{-p} in @code{wdiff}. @item prompt @samp{-p} in @code{ed}. @item proxy Specify an HTTP proxy. @item query-user @samp{-X} in @code{shar}. @item question @samp{-q} in @code{make}. @item quiet Used in many programs to inhibit the usual output. Every program accepting @samp{--quiet} should accept @samp{--silent} as a synonym. @item quiet-unshar @samp{-Q} in @code{shar} @item quote-name @samp{-Q} in @code{ls}. @item rcs @samp{-n} in @code{diff}. @item re-interval Used in @code{gawk}. @item read-full-blocks @samp{-B} in @code{tar}. @item readnow Used in GDB. @item recon @samp{-n} in @code{make}. @item record-number @samp{-R} in @code{tar}. @item recursive Used in @code{chgrp}, @code{chown}, @code{cp}, @code{ls}, @code{diff}, and @code{rm}. @item reference @samp{-r} in @code{touch}. @item references @samp{-r} in @code{ptx}. @item regex @samp{-r} in @code{tac} and @code{etags}. @item release @samp{-r} in @code{uname}. @item reload-state @samp{-R} in @code{m4}. @item relocation @samp{-r} in @code{objdump}. @item rename @samp{-r} in @code{cpio}. @item replace @samp{-i} in @code{xargs}. @item report-identical-files @samp{-s} in @code{diff}. @item reset-access-time @samp{-a} in @code{cpio}. @item reverse @samp{-r} in @code{ls} and @code{nm}. @item reversed-ed @samp{-f} in @code{diff}. @item right-side-defs @samp{-R} in @code{ptx}. @item same-order @samp{-s} in @code{tar}. @item same-permissions @samp{-p} in @code{tar}. @item save @samp{-g} in @code{stty}. @item se Used in GDB. @item sentence-regexp @samp{-S} in @code{ptx}. @item separate-dirs @samp{-S} in @code{du}. @item separator @samp{-s} in @code{tac}. @item sequence Used by @code{recode} to chose files or pipes for sequencing passes. @item shell @samp{-s} in @code{su}. @item show-all @samp{-A} in @code{cat}. @item show-c-function @samp{-p} in @code{diff}. @item show-ends @samp{-E} in @code{cat}. @item show-function-line @samp{-F} in @code{diff}. @item show-tabs @samp{-T} in @code{cat}. @item silent Used in many programs to inhibit the usual output. Every program accepting @samp{--silent} should accept @samp{--quiet} as a synonym. @item size @samp{-s} in @code{ls}. @item socket Specify a file descriptor for a network server to use for its socket, instead of opening and binding a new socket. This provides a way to run, in a non-privileged process, a server that normally needs a reserved port number. @item sort Used in @code{ls}. @item source @samp{-W source} in @code{gawk}. @item sparse @samp{-S} in @code{tar}. @item speed-large-files @samp{-H} in @code{diff}. @item split-at @samp{-E} in @code{unshar}. @item split-size-limit @samp{-L} in @code{shar}. @item squeeze-blank @samp{-s} in @code{cat}. @item start-delete @samp{-w} in @code{wdiff}. @item start-insert @samp{-y} in @code{wdiff}. @item starting-file Used in @code{tar} and @code{diff} to specify which file within a directory to start processing with. @item statistics @samp{-s} in @code{wdiff}. @item stdin-file-list @samp{-S} in @code{shar}. @item stop @samp{-S} in @code{make}. @item strict @samp{-s} in @code{recode}. @item strip @samp{-s} in @code{install}. @item strip-all @samp{-s} in @code{strip}. @item strip-debug @samp{-S} in @code{strip}. @item submitter @samp{-s} in @code{shar}. @item suffix @samp{-S} in @code{cp}, @code{ln}, @code{mv}. @item suffix-format @samp{-b} in @code{csplit}. @item sum @samp{-s} in @code{gprof}. @item summarize @samp{-s} in @code{du}. @item symbolic @samp{-s} in @code{ln}. @item symbols Used in GDB and @code{objdump}. @item synclines @samp{-s} in @code{m4}. @item sysname @samp{-s} in @code{uname}. @item tabs @samp{-t} in @code{expand} and @code{unexpand}. @item tabsize @samp{-T} in @code{ls}. @item terminal @samp{-T} in @code{tput} and @code{ul}. @samp{-t} in @code{wdiff}. @item text @samp{-a} in @code{diff}. @item text-files @samp{-T} in @code{shar}. @item time Used in @code{ls} and @code{touch}. @item timeout Specify how long to wait before giving up on some operation. @item to-stdout @samp{-O} in @code{tar}. @item total @samp{-c} in @code{du}. @item touch @samp{-t} in @code{make}, @code{ranlib}, and @code{recode}. @item trace @samp{-t} in @code{m4}. @item traditional @samp{-t} in @code{hello}; @samp{-W traditional} in @code{gawk}; @samp{-G} in @code{ed}, @code{m4}, and @code{ptx}. @item tty Used in GDB. @item typedefs @samp{-t} in @code{ctags}. @item typedefs-and-c++ @samp{-T} in @code{ctags}. @item typeset-mode @samp{-t} in @code{ptx}. @item uncompress @samp{-z} in @code{tar}. @item unconditional @samp{-u} in @code{cpio}. @item undefine @samp{-U} in @code{m4}. @item undefined-only @samp{-u} in @code{nm}. @item update @samp{-u} in @code{cp}, @code{ctags}, @code{mv}, @code{tar}. @item usage Used in @code{gawk}; same as @samp{--help}. @item uuencode @samp{-B} in @code{shar}. @item vanilla-operation @samp{-V} in @code{shar}. @item verbose Print more information about progress. Many programs support this. @item verify @samp{-W} in @code{tar}. @item version Print the version number. @item version-control @samp{-V} in @code{cp}, @code{ln}, @code{mv}. @item vgrind @samp{-v} in @code{ctags}. @item volume @samp{-V} in @code{tar}. @item what-if @samp{-W} in @code{make}. @item whole-size-limit @samp{-l} in @code{shar}. @item width @samp{-w} in @code{ls} and @code{ptx}. @item word-regexp @samp{-W} in @code{ptx}. @item writable @samp{-T} in @code{who}. @item zeros @samp{-z} in @code{gprof}. @end table @node OID Allocations @section OID Allocations @cindex OID allocations for GNU @cindex SNMP @cindex LDAP @cindex X.509 The OID (object identifier) 1.3.6.1.4.1.11591 has been assigned to the GNU Project (thanks to Sergey Poznyakoff). These are used for SNMP, LDAP, X.509 certificates, and so on. The web site @url{https://www.alvestrand.no/objectid} has a (voluntary) listing of many OID assignments. If you need a new slot for your GNU package, write @email{maintainers@@gnu.org}. Here is a list of arcs currently assigned: @example @include gnu-oids.texi @end example @node Memory Usage @section Memory Usage @cindex memory usage If a program typically uses just a few meg of memory, don't bother making any effort to reduce memory usage. For example, if it is impractical for other reasons to operate on files more than a few meg long, it is reasonable to read entire input files into memory to operate on them. However, for programs such as @code{cat} or @code{tail}, that can usefully operate on very large files, it is important to avoid using a technique that would artificially limit the size of files it can handle. If a program works by lines and could be applied to arbitrary user-supplied input files, it should keep only a line in memory, because this is not very hard and users will want to be able to operate on input files that are bigger than will fit in memory all at once. If your program creates complicated data structures, just make them in memory and give a fatal error if @code{malloc} returns @code{NULL}. @pindex valgrind @cindex memory leak Memory analysis tools such as @command{valgrind} can be useful, but don't complicate a program merely to avoid their false alarms. For example, if memory is used until just before a process exits, don't free it simply to silence such a tool. @node File Usage @section File Usage @cindex file usage Programs should be prepared to operate when @file{/usr} and @file{/etc} are read-only file systems. Thus, if the program manages log files, lock files, backup files, score files, or any other files which are modified for internal purposes, these files should not be stored in @file{/usr} or @file{/etc}. There are two exceptions. @file{/etc} is used to store system configuration information; it is reasonable for a program to modify files in @file{/etc} when its job is to update the system configuration. Also, if the user explicitly asks to modify one file in a directory, it is reasonable for the program to store other files in the same directory. @node Writing C @chapter Making The Best Use of C This chapter provides advice on how best to use the C language when writing GNU software. @menu * Formatting:: Formatting your source code. * Comments:: Commenting your work. * Syntactic Conventions:: Clean use of C constructs. * Names:: Naming variables, functions, and files. * System Portability:: Portability among different operating systems. * CPU Portability:: Supporting the range of CPU types. * System Functions:: Portability and ``standard'' library functions. * Internationalization:: Techniques for internationalization. * Character Set:: Use ASCII by default. * Quote Characters:: Use "..." or '...' in the C locale. * Mmap:: How you can safely use @code{mmap}. @end menu @node Formatting @section Formatting Your Source Code @cindex formatting source code @cindex line length @cindex length of source lines Please keep the length of source lines to 79 characters or less, for maximum readability in the widest range of environments. @cindex open brace @cindex braces, in C source @cindex function definitions, formatting It is important to put the open-brace that starts the body of a C function in column one, so that they will start a defun. Several tools look for open-braces in column one to find the beginnings of C functions. These tools will not work on code not formatted that way. Avoid putting open-brace, open-parenthesis or open-bracket in column one when they are inside a function, so that they won't start a defun. The open-brace that starts a @code{struct} body can go in column one if you find it useful to treat that definition as a defun. It is also important for function definitions to start the name of the function in column one. This helps people to search for function definitions, and may also help certain tools recognize them. Thus, using Standard C syntax, the format is this: @example static char * concat (char *s1, char *s2) @{ @dots{} @} @end example @noindent or, if you want to use traditional C syntax, format the definition like this: @example static char * concat (s1, s2) /* Name starts in column one here */ char *s1, *s2; @{ /* Open brace in column one here */ @dots{} @} @end example In Standard C, if the arguments don't fit nicely on one line, split it like this: @example int lots_of_args (int an_integer, long a_long, short a_short, double a_double, float a_float) @dots{} @end example @cindex @code{struct} types, formatting @cindex @code{enum} types, formatting For @code{struct} and @code{enum} types, likewise put the braces in column one, unless the whole contents fits on one line: @example struct foo @{ int a, b; @} @exdent @r{or} struct foo @{ int a, b; @} @end example The rest of this section gives our recommendations for other aspects of C formatting style, which is also the default style of the @code{indent} program in version 1.2 and newer. It corresponds to the options @smallexample -nbad -bap -nbc -bbo -bl -bli2 -bls -ncdb -nce -cp1 -cs -di2 -ndj -nfc1 -nfca -hnl -i2 -ip5 -lp -pcs -psl -nsc -nsob @end smallexample We don't think of these recommendations as requirements, because it causes no problems for users if two different programs have different formatting styles. But whatever style you use, please use it consistently, since a mixture of styles within one program tends to look ugly. If you are contributing changes to an existing program, please follow the style of that program. For the body of the function, our recommended style looks like this: @example if (x < foo (y, z)) haha = bar[4] + 5; else @{ while (z) @{ haha += foo (z, z); z--; @} return ++x + bar (); @} @end example @cindex spaces before open-paren We find it easier to read a program when it has spaces before the open-parentheses and after the commas. Especially after the commas. When you split an expression into multiple lines, split it before an operator, not after one. Here is the right way: @cindex expressions, splitting @example if (foo_this_is_long && bar > win (x, y, z) && remaining_condition) @end example Try to avoid having two operators of different precedence at the same level of indentation. For example, don't write this: @example mode = (inmode[j] == VOIDmode || GET_MODE_SIZE (outmode[j]) > GET_MODE_SIZE (inmode[j]) ? outmode[j] : inmode[j]); @end example Instead, use extra parentheses so that the indentation shows the nesting: @example mode = ((inmode[j] == VOIDmode || (GET_MODE_SIZE (outmode[j]) > GET_MODE_SIZE (inmode[j]))) ? outmode[j] : inmode[j]); @end example Insert extra parentheses so that Emacs will indent the code properly. For example, the following indentation looks nice if you do it by hand, @example v = rup->ru_utime.tv_sec*1000 + rup->ru_utime.tv_usec/1000 + rup->ru_stime.tv_sec*1000 + rup->ru_stime.tv_usec/1000; @end example @noindent but Emacs would alter it. Adding a set of parentheses produces something that looks equally nice, and which Emacs will preserve: @example v = (rup->ru_utime.tv_sec*1000 + rup->ru_utime.tv_usec/1000 + rup->ru_stime.tv_sec*1000 + rup->ru_stime.tv_usec/1000); @end example Format do-while statements like this: @example do @{ a = foo (a); @} while (a > 0); @end example @cindex formfeed @cindex control-L Please use formfeed characters (control-L) to divide the program into pages at logical places (but not within a function). It does not matter just how long the pages are, since they do not have to fit on a printed page. The formfeeds should appear alone on lines by themselves. @node Comments @section Commenting Your Work @cindex commenting Every program should start with a comment saying briefly what it is for. Example: @samp{fmt - filter for simple filling of text}. This comment should be at the top of the source file containing the @samp{main} function of the program. Also, please write a brief comment at the start of each source file, with the file name and a line or two about the overall purpose of the file. Please write the comments in a GNU program in English, because English is the one language that nearly all programmers in all countries can read. If you do not write English well, please write comments in English as well as you can, then ask other people to help rewrite them. If you can't write comments in English, please find someone to work with you and translate your comments into English. Please put a comment on each function saying what the function does, what sorts of arguments it gets, and what the possible values of arguments mean and are used for. It is not necessary to duplicate in words the meaning of the C argument declarations, if a C type is being used in its customary fashion. If there is anything nonstandard about its use (such as an argument of type @code{char *} which is really the address of the second character of a string, not the first), or any possible values that would not work the way one would expect (such as, that strings containing newlines are not guaranteed to work), be sure to say so. Also explain the significance of the return value, if there is one. Please put two spaces after the end of a sentence in your comments, so that the Emacs sentence commands will work. Also, please write complete sentences and capitalize the first word. If a lower-case identifier comes at the beginning of a sentence, don't capitalize it! Changing the spelling makes it a different identifier. If you don't like starting a sentence with a lower case letter, write the sentence differently (e.g., ``The identifier lower-case is @dots{}''). The comment on a function is much clearer if you use the argument names to speak about the argument values. The variable name itself should be lower case, but write it in upper case when you are speaking about the value rather than the variable itself. Thus, ``the inode number NODE_NUM'' rather than ``an inode''. There is usually no purpose in restating the name of the function in the comment before it, because readers can see that for themselves. There might be an exception when the comment is so long that the function itself would be off the bottom of the screen. There should be a comment on each static variable as well, like this: @example /* Nonzero means truncate lines in the display; zero means continue them. */ int truncate_lines; @end example @cindex conditionals, comments for @cindex @code{#endif}, commenting Every @samp{#endif} should have a comment, except in the case of short conditionals (just a few lines) that are not nested. The comment should state the condition of the conditional that is ending, @emph{including its sense}. @samp{#else} should have a comment describing the condition @emph{and sense} of the code that follows. For example: @example @group #ifdef foo @dots{} #else /* not foo */ @dots{} #endif /* not foo */ @end group @group #ifdef foo @dots{} #endif /* foo */ @end group @end example @noindent but, by contrast, write the comments this way for a @samp{#ifndef}: @example @group #ifndef foo @dots{} #else /* foo */ @dots{} #endif /* foo */ @end group @group #ifndef foo @dots{} #endif /* not foo */ @end group @end example @node Syntactic Conventions @section Clean Use of C Constructs @cindex syntactic conventions @cindex implicit @code{int} @cindex function argument, declaring Please explicitly declare the types of all objects. For example, you should explicitly declare all arguments to functions, and you should declare functions to return @code{int} rather than omitting the @code{int}. @cindex compiler warnings @cindex @samp{-Wall} compiler option Some programmers like to use the GCC @samp{-Wall} option, and change the code whenever it issues a warning. If you want to do this, then do. Other programmers prefer not to use @samp{-Wall}, because it gives warnings for valid and legitimate code which they do not want to change. If you want to do this, then do. The compiler should be your servant, not your master. @pindex clang @pindex lint Don't make the program ugly just to placate static analysis tools such as @command{lint}, @command{clang}, and GCC with extra warnings options such as @option{-Wconversion} and @option{-Wundef}. These tools can help find bugs and unclear code, but they can also generate so many false alarms that it hurts readability to silence them with unnecessary casts, wrappers, and other complications. For example, please don't insert casts to @code{void} or calls to do-nothing functions merely to pacify a lint checker. Declarations of external functions and functions to appear later in the source file should all go in one place near the beginning of the file (somewhere before the first function definition in the file), or else should go in a header file. Don't put @code{extern} declarations inside functions. @cindex temporary variables It used to be common practice to use the same local variables (with names like @code{tem}) over and over for different values within one function. Instead of doing this, it is better to declare a separate local variable for each distinct purpose, and give it a name which is meaningful. This not only makes programs easier to understand, it also facilitates optimization by good compilers. You can also move the declaration of each local variable into the smallest scope that includes all its uses. This makes the program even cleaner. Don't use local variables or parameters that shadow global identifiers. GCC's @samp{-Wshadow} option can detect this problem. @cindex multiple variables in a line Don't declare multiple variables in one declaration that spans lines. Start a new declaration on each line, instead. For example, instead of this: @example @group int foo, bar; @end group @end example @noindent write either this: @example int foo, bar; @end example @noindent or this: @example int foo; int bar; @end example @noindent (If they are global variables, each should have a comment preceding it anyway.) When you have an @code{if}-@code{else} statement nested in another @code{if} statement, always put braces around the @code{if}-@code{else}. Thus, never write like this: @example if (foo) if (bar) win (); else lose (); @end example @noindent always like this: @example if (foo) @{ if (bar) win (); else lose (); @} @end example If you have an @code{if} statement nested inside of an @code{else} statement, either write @code{else if} on one line, like this, @example if (foo) @dots{} else if (bar) @dots{} @end example @noindent with its @code{then}-part indented like the preceding @code{then}-part, or write the nested @code{if} within braces like this: @example if (foo) @dots{} else @{ if (bar) @dots{} @} @end example Don't declare both a structure tag and variables or typedefs in the same declaration. Instead, declare the structure tag separately and then use it to declare the variables or typedefs. Try to avoid assignments inside @code{if}-conditions (assignments inside @code{while}-conditions are ok). For example, don't write this: @example if ((foo = (char *) malloc (sizeof *foo)) == NULL) fatal ("virtual memory exhausted"); @end example @noindent instead, write this: @example foo = (char *) malloc (sizeof *foo); if (foo == NULL) fatal ("virtual memory exhausted"); @end example @node Names @section Naming Variables, Functions, and Files @cindex names of variables, functions, and files The names of global variables and functions in a program serve as comments of a sort. So don't choose terse names---instead, look for names that give useful information about the meaning of the variable or function. In a GNU program, names should be English, like other comments. Local variable names can be shorter, because they are used only within one context, where (presumably) comments explain their purpose. Try to limit your use of abbreviations in symbol names. It is ok to make a few abbreviations, explain what they mean, and then use them frequently, but don't use lots of obscure abbreviations. Please use underscores to separate words in a name, so that the Emacs word commands can be useful within them. Stick to lower case; reserve upper case for macros and @code{enum} constants, and for name-prefixes that follow a uniform convention. For example, you should use names like @code{ignore_space_change_flag}; don't use names like @code{iCantReadThis}. Variables that indicate whether command-line options have been specified should be named after the meaning of the option, not after the option-letter. A comment should state both the exact meaning of the option and its letter. For example, @example @group /* Ignore changes in horizontal whitespace (-b). */ int ignore_space_change_flag; @end group @end example When you want to define names with constant integer values, use @code{enum} rather than @samp{#define}. GDB knows about enumeration constants. @cindex file-name limitations @pindex doschk You might want to make sure that none of the file names would conflict if the files were loaded onto an MS-DOS file system which shortens the names. You can use the program @code{doschk} to test for this. Some GNU programs were designed to limit themselves to file names of 14 characters or less, to avoid file name conflicts if they are read into older System V systems. Please preserve this feature in the existing GNU programs that have it, but there is no need to do this in new GNU programs. @code{doschk} also reports file names longer than 14 characters. @node System Portability @section Portability between System Types @cindex portability, between system types In the Unix world, ``portability'' refers to porting to different Unix versions. For a GNU program, this kind of portability is desirable, but not paramount. The primary purpose of GNU software is to run on top of the GNU kernel, compiled with the GNU C compiler, on various types of CPU@. So the kinds of portability that are absolutely necessary are quite limited. But it is important to support Linux-based GNU systems, since they are the form of GNU that is popular. Beyond that, it is good to support the other free operating systems (*BSD), and it is nice to support other Unix-like systems if you want to. Supporting a variety of Unix-like systems is desirable, although not paramount. It is usually not too hard, so you may as well do it. But you don't have to consider it an obligation, if it does turn out to be hard. @pindex autoconf The easiest way to achieve portability to most Unix-like systems is to use Autoconf. It's unlikely that your program needs to know more information about the host platform than Autoconf can provide, simply because most of the programs that need such knowledge have already been written. Avoid using the format of semi-internal data bases (e.g., directories) when there is a higher-level alternative (@code{readdir}). @cindex non-POSIX systems, and portability As for systems that are not like Unix, such as MS-DOS, Windows, VMS, MVS, and older Macintosh systems, supporting them is often a lot of work. When that is the case, it is better to spend your time adding features that will be useful on GNU and GNU/Linux, rather than on supporting other incompatible systems. If you do support Windows, please do not abbreviate it as ``win''. @xref{Trademarks}. Usually we write the name ``Windows'' in full, but when brevity is very important (as in file names and some symbol names), we abbreviate it to ``w''. In GNU Emacs, for instance, we use @samp{w32} in file names of Windows-specific files, but the macro for Windows conditionals is called @code{WINDOWSNT}. In principle there could also be @samp{w64}. It is a good idea to define the ``feature test macro'' @code{_GNU_SOURCE} when compiling your C files. When you compile on GNU or GNU/Linux, this will enable the declarations of GNU library extension functions, and that will usually give you a compiler error message if you define the same function names in some other way in your program. (You don't have to actually @emph{use} these functions, if you prefer to make the program more portable to other systems.) But whether or not you use these GNU extensions, you should avoid using their names for any other meanings. Doing so would make it hard to move your code into other GNU programs. @node CPU Portability @section Portability between CPUs @cindex data types, and portability @cindex portability, and data types Even GNU systems will differ because of differences among CPU types---for example, difference in byte ordering and alignment requirements. It is absolutely essential to handle these differences. However, don't make any effort to cater to the possibility that an @code{int} will be less than 32 bits. We don't support 16-bit machines in GNU. You need not cater to the possibility that @code{long} will be smaller than pointers and @code{size_t}. We know of one such platform: 64-bit programs on Microsoft Windows. If you care about making your package run on Windows using Mingw64, you would need to deal with 8-byte pointers and 4-byte @code{long}, which would break this code: @example printf ("size = %lu\n", (unsigned long) sizeof array); printf ("diff = %ld\n", (long) (pointer2 - pointer1)); @end example Whether to support Mingw64, and Windows in general, in your package is your choice. The GNU Project doesn't say you have any responsibility to do so. Our goal is to replace proprietary systems, including Windows, not to enhance them. If people pressure you to make your program run on Windows, and you are not interested, you can respond with, ``Switch to GNU/Linux --- your freedom depends on it.'' Predefined file-size types like @code{off_t} are an exception: they are longer than @code{long} on many platforms, so code like the above won't work with them. One way to print an @code{off_t} value portably is to print its digits yourself, one by one. Don't assume that the address of an @code{int} object is also the address of its least-significant byte. This is false on big-endian machines. Thus, don't make the following mistake: @example int c; @dots{} while ((c = getchar ()) != EOF) write (file_descriptor, &c, 1); @end example @noindent Instead, use @code{unsigned char} as follows. (The @code{unsigned} is for portability to unusual systems where @code{char} is signed and where there is integer overflow checking.) @example int c; while ((c = getchar ()) != EOF) @{ unsigned char u = c; write (file_descriptor, &u, 1); @} @end example @cindex casting pointers to integers Avoid casting pointers to integers if you can. Such casts greatly reduce portability, and in most programs they are easy to avoid. In the cases where casting pointers to integers is essential---such as, a Lisp interpreter which stores type information as well as an address in one word---you'll have to make explicit provisions to handle different word sizes. You will also need to make provision for systems in which the normal range of addresses you can get from @code{malloc} starts far away from zero. @node System Functions @section Calling System Functions @cindex C library functions, and portability @cindex POSIX functions, and portability @cindex library functions, and portability @cindex portability, and library functions Historically, C implementations differed substantially, and many systems lacked a full implementation of ANSI/ISO C89. Nowadays, however, all practical systems have a C89 compiler and GNU C supports almost all of C99 and some of C11. Similarly, most systems implement POSIX.1-2001 libraries and tools, and many have POSIX.1-2008. Hence, there is little reason to support old C or non-POSIX systems, and you may want to take advantage of standard C and POSIX to write clearer, more portable, or faster code. You should use standard interfaces where possible; but if GNU extensions make your program more maintainable, powerful, or otherwise better, don't hesitate to use them. In any case, don't make your own declaration of system functions; that's a recipe for conflict. Despite the standards, nearly every library function has some sort of portability issue on some system or another. Here are some examples: @table @code @item open Names with trailing @code{/}'s are mishandled on many platforms. @item printf @code{long double} may be unimplemented; floating values Infinity and NaN are often mishandled; output for large precisions may be incorrect. @item readlink May return @code{int} instead of @code{ssize_t}. @item scanf On Windows, @code{errno} is not set on failure. @end table @cindex Gnulib @uref{https://www.gnu.org/software/gnulib/, Gnulib} is a big help in this regard. Gnulib provides implementations of standard interfaces on many of the systems that lack them, including portable implementations of enhanced GNU interfaces, thereby making their use portable, and of POSIX-1.2008 interfaces, some of which are missing even on up-to-date GNU systems. @findex xmalloc, in Gnulib @findex error messages, in Gnulib @findex data structures, in Gnulib Gnulib also provides many useful non-standard interfaces; for example, C implementations of standard data structures (hash tables, binary trees), error-checking type-safe wrappers for memory allocation functions (@code{xmalloc}, @code{xrealloc}), and output of error messages. Gnulib integrates with GNU Autoconf and Automake to remove much of the burden of writing portable code from the programmer: Gnulib makes your configure script automatically determine what features are missing and use the Gnulib code to supply the missing pieces. The Gnulib and Autoconf manuals have extensive sections on portability: @ref{Top,, Introduction, gnulib, Gnulib} and @pxref{Portable C and C++,,, autoconf, Autoconf}. Please consult them for many more details. @node Internationalization @section Internationalization @cindex internationalization @pindex gettext GNU has a library called GNU gettext that makes it easy to translate the messages in a program into various languages. You should use this library in every program. Use English for the messages as they appear in the program, and let gettext provide the way to translate them into other languages. Using GNU gettext involves putting a call to the @code{gettext} macro around each string that might need translation---like this: @example printf (gettext ("Processing file '%s'..."), file); @end example @noindent This permits GNU gettext to replace the string @code{"Processing file '%s'..."} with a translated version. Once a program uses gettext, please make a point of writing calls to @code{gettext} when you add new strings that call for translation. Using GNU gettext in a package involves specifying a @dfn{text domain name} for the package. The text domain name is used to separate the translations for this package from the translations for other packages. Normally, the text domain name should be the same as the name of the package---for example, @samp{coreutils} for the GNU core utilities. @cindex message text, and internationalization To enable gettext to work well, avoid writing code that makes assumptions about the structure of words or sentences. When you want the precise text of a sentence to vary depending on the data, use two or more alternative string constants each containing a complete sentences, rather than inserting conditionalized words or phrases into a single sentence framework. Here is an example of what not to do: @smallexample printf ("%s is full", capacity > 5000000 ? "disk" : "floppy disk"); @end smallexample If you apply gettext to all strings, like this, @smallexample printf (gettext ("%s is full"), capacity > 5000000 ? gettext ("disk") : gettext ("floppy disk")); @end smallexample @noindent the translator will hardly know that "disk" and "floppy disk" are meant to be substituted in the other string. Worse, in some languages (like French) the construction will not work: the translation of the word "full" depends on the gender of the first part of the sentence; it happens to be not the same for "disk" as for "floppy disk". Complete sentences can be translated without problems: @example printf (capacity > 5000000 ? gettext ("disk is full") : gettext ("floppy disk is full")); @end example A similar problem appears at the level of sentence structure with this code: @example printf ("# Implicit rule search has%s been done.\n", f->tried_implicit ? "" : " not"); @end example @noindent Adding @code{gettext} calls to this code cannot give correct results for all languages, because negation in some languages requires adding words at more than one place in the sentence. By contrast, adding @code{gettext} calls does the job straightforwardly if the code starts out like this: @example printf (f->tried_implicit ? "# Implicit rule search has been done.\n", : "# Implicit rule search has not been done.\n"); @end example Another example is this one: @example printf ("%d file%s processed", nfiles, nfiles != 1 ? "s" : ""); @end example @noindent The problem with this example is that it assumes that plurals are made by adding `s'. If you apply gettext to the format string, like this, @example printf (gettext ("%d file%s processed"), nfiles, nfiles != 1 ? "s" : ""); @end example @noindent the message can use different words, but it will still be forced to use `s' for the plural. Here is a better way, with gettext being applied to the two strings independently: @example printf ((nfiles != 1 ? gettext ("%d files processed") : gettext ("%d file processed")), nfiles); @end example @noindent But this still doesn't work for languages like Polish, which has three plural forms: one for nfiles == 1, one for nfiles == 2, 3, 4, 22, 23, 24, ... and one for the rest. The GNU @code{ngettext} function solves this problem: @example printf (ngettext ("%d files processed", "%d file processed", nfiles), nfiles); @end example @node Character Set @section Character Set @cindex character set @cindex encodings @cindex ASCII characters @cindex non-ASCII characters Sticking to the ASCII character set (plain text, 7-bit characters) is preferred in GNU source code comments, text documents, and other contexts, unless there is good reason to do something else because of the application domain. For example, if source code deals with the French Revolutionary calendar, it is OK if its literal strings contain accented characters in month names like ``Flor@'eal''. Also, it is OK (but not required) to use non-ASCII characters to represent proper names of contributors in change logs (@pxref{Change Logs}). If you need to use non-ASCII characters, you should normally stick with one encoding, certainly within a single file. UTF-8 is likely to be the best choice. @node Quote Characters @section Quote Characters @cindex quote characters @cindex locale-specific quote characters @cindex left quote @cindex right quote @cindex opening quote @cindex single quote @cindex double quote @cindex grave accent @set txicodequoteundirected @set txicodequotebacktick In the C locale, the output of GNU programs should stick to plain ASCII for quotation characters in messages to users: preferably 0x22 (@samp{"}) or 0x27 (@samp{'}) for both opening and closing quotes. Although GNU programs traditionally used 0x60 (@samp{`}) for opening and 0x27 (@samp{'}) for closing quotes, nowadays quotes @samp{`like this'} are typically rendered asymmetrically, so quoting @samp{"like this"} or @samp{'like this'} typically looks better. It is ok, but not required, for GNU programs to generate locale-specific quotes in non-C locales. For example: @example printf (gettext ("Processing file '%s'..."), file); @end example @noindent Here, a French translation might cause @code{gettext} to return the string @code{"Traitement de fichier @guilsinglleft{}@tie{}%s@tie{}@guilsinglright{}..."}, yielding quotes more appropriate for a French locale. Sometimes a program may need to use opening and closing quotes directly. By convention, @code{gettext} translates the string @samp{"`"} to the opening quote and the string @samp{"'"} to the closing quote, and a program can use these translations. Generally, though, it is better to translate quote characters in the context of longer strings. If the output of your program is ever likely to be parsed by another program, it is good to provide an option that makes this parsing reliable. For example, you could escape special characters using conventions from the C language or the Bourne shell. See for example the option @option{--quoting-style} of GNU @code{ls}. @clear txicodequoteundirected @clear txicodequotebacktick @node Mmap @section Mmap @findex mmap If you use @code{mmap} to read or write files, don't assume it either works on all files or fails for all files. It may work on some files and fail on others. The proper way to use @code{mmap} is to try it on the specific file for which you want to use it---and if @code{mmap} doesn't work, fall back on doing the job in another way using @code{read} and @code{write}. The reason this precaution is needed is that the GNU kernel (the HURD) provides a user-extensible file system, in which there can be many different kinds of ``ordinary files''. Many of them support @code{mmap}, but some do not. It is important to make programs handle all these kinds of files. @node Documentation @chapter Documenting Programs @cindex documentation A GNU program should ideally come with full free documentation, adequate for both reference and tutorial purposes. If the package can be programmed or extended, the documentation should cover programming or extending it, as well as just using it. @menu * GNU Manuals:: Writing proper manuals. * Doc Strings and Manuals:: Compiling doc strings doesn't make a manual. * Manual Structure Details:: Specific structure conventions. * License for Manuals:: Writing the distribution terms for a manual. * Manual Credits:: Giving credit to documentation contributors. * Printed Manuals:: Mentioning the printed manual. * NEWS File:: NEWS files supplement manuals. * Change Logs:: Recording changes. * Man Pages:: Man pages are secondary. * Reading other Manuals:: How far you can go in learning from other manuals. @end menu @node GNU Manuals @section GNU Manuals The preferred document format for the GNU system is the Texinfo formatting language. Every GNU package should (ideally) have documentation in Texinfo both for reference and for learners. Texinfo makes it possible to produce a good quality formatted book, using @TeX{}, and to generate an Info file. It is also possible to generate HTML output from Texinfo source. See the Texinfo manual, either the hardcopy, or the on-line version available through @code{info} or the Emacs Info subsystem (@kbd{C-h i}). Nowadays some other formats such as Docbook and Sgmltexi can be converted automatically into Texinfo. It is ok to produce the Texinfo documentation by conversion this way, as long as it gives good results. Make sure your manual is clear to a reader who knows nothing about the topic and reads it straight through. This means covering basic topics at the beginning, and advanced topics only later. This also means defining every specialized term when it is first used. Remember that the audience for a GNU manual (and other GNU documentation) is global, and that it will be used for years, maybe decades. This means that the reader could have very different cultural reference points. Decades from now, all but old folks will have very different cultural reference points; many things that "everyone knows about" today may be mostly forgotten. For this reason, try to avoid writing in a way that depends on cultural reference points for proper understanding, or that refers to them in ways that would impede reading for someone that doesn't recognize them. Likewise, be conservative in your choice of words (aside from technical terms), linguistic constructs, and spelling: aim to make them intelligible to readers from ten years ago. In any contest for trendiness, GNU writing should not even qualify to enter. It is ok to refer once in a rare while to spatially or temporally localized reference points or facts, if it is directly pertinent or as an aside. Changing these few things (which in any case stand out) when they no longer make sense will not be a lot of work. By contrast, it is always proper to refer to concepts of GNU and the free software movement, when they are pertinent. These are a central part of our message, so we should take advantage of opportunities to mention them. They are fundamental moral positions, so they will rarely if ever change. Programmers tend to carry over the structure of the program as the structure for its documentation. But this structure is not necessarily good for explaining how to use the program; it may be irrelevant and confusing for a user. Instead, the right way to structure documentation is according to the concepts and questions that a user will have in mind when reading it. This principle applies at every level, from the lowest (ordering sentences in a paragraph) to the highest (ordering of chapter topics within the manual). Sometimes this structure of ideas matches the structure of the implementation of the software being documented---but often they are different. An important part of learning to write good documentation is to learn to notice when you have unthinkingly structured the documentation like the implementation, stop yourself, and look for better alternatives. For example, each program in the GNU system probably ought to be documented in one manual; but this does not mean each program should have its own manual. That would be following the structure of the implementation, rather than the structure that helps the user understand. Instead, each manual should cover a coherent @emph{topic}. For example, instead of a manual for @code{diff} and a manual for @code{diff3}, we have one manual for ``comparison of files'' which covers both of those programs, as well as @code{cmp}. By documenting these programs together, we can make the whole subject clearer. The manual which discusses a program should certainly document all of the program's command-line options and all of its commands. It should give examples of their use. But don't organize the manual as a list of features. Instead, organize it logically, by subtopics. Address the questions that a user will ask when thinking about the job that the program does. Don't just tell the reader what each feature can do---say what jobs it is good for, and show how to use it for those jobs. Explain what is recommended usage, and what kinds of usage users should avoid. In general, a GNU manual should serve both as tutorial and reference. It should be set up for convenient access to each topic through Info, and for reading straight through (appendixes aside). A GNU manual should give a good introduction to a beginner reading through from the start, and should also provide all the details that hackers want. The Bison manual is a good example of this---please take a look at it to see what we mean. That is not as hard as it first sounds. Arrange each chapter as a logical breakdown of its topic, but order the sections, and write their text, so that reading the chapter straight through makes sense. Do likewise when structuring the book into chapters, and when structuring a section into paragraphs. The watchword is, @emph{at each point, address the most fundamental and important issue raised by the preceding text.} If necessary, add extra chapters at the beginning of the manual which are purely tutorial and cover the basics of the subject. These provide the framework for a beginner to understand the rest of the manual. The Bison manual provides a good example of how to do this. To serve as a reference, a manual should have an Index that lists all the functions, variables, options, and important concepts that are part of the program. One combined Index should do for a short manual, but sometimes for a complex package it is better to use multiple indices. The Texinfo manual includes advice on preparing good index entries, see @ref{Index Entries, , Making Index Entries, texinfo, GNU Texinfo}, and see @ref{Indexing Commands, , Defining the Entries of an Index, texinfo, GNU Texinfo}. Don't use Unix man pages as a model for how to write GNU documentation; most of them are terse, badly structured, and give inadequate explanation of the underlying concepts. (There are, of course, some exceptions.) Also, Unix man pages use a particular format which is different from what we use in GNU manuals. Please include an email address in the manual for where to report bugs @emph{in the text of the manual}. Please do not use the term ``pathname'' that is used in Unix documentation; use ``file name'' (two words) instead. We use the term ``path'' only for search paths, which are lists of directory names. Please do not use the term ``illegal'' to refer to erroneous input to a computer program. Please use ``invalid'' for this, and reserve the term ``illegal'' for activities prohibited by law. Please do not write @samp{()} after a function name just to indicate it is a function. @code{foo ()} is not a function, it is a function call with no arguments. Whenever possible, please stick to the active voice, avoiding the passive, and use the present tense, not the future tense. For instance, write ``The function @code{foo} returns a list containing @var{a} and @var{b}'' rather than ``A list containing @var{a} and @var{b} will be returned.'' One advantage of the active voice is it requires you to state the subject of the sentence; with the passive voice, you might omit the subject, which leads to vagueness. It is proper to use the future tense when grammar demands it, as in, ``If you type @kbd{x}, the computer will self-destruct in 10 seconds.'' @node Doc Strings and Manuals @section Doc Strings and Manuals Some programming systems, such as Emacs, provide a documentation string for each function, command or variable. You may be tempted to write a reference manual by compiling the documentation strings and writing a little additional text to go around them---but you must not do it. That approach is a fundamental mistake. The text of well-written documentation strings will be entirely wrong for a manual. A documentation string needs to stand alone---when it appears on the screen, there will be no other text to introduce or explain it. Meanwhile, it can be rather informal in style. The text describing a function or variable in a manual must not stand alone; it appears in the context of a section or subsection. Other text at the beginning of the section should explain some of the concepts, and should often make some general points that apply to several functions or variables. The previous descriptions of functions and variables in the section will also have given information about the topic. A description written to stand alone would repeat some of that information; this redundancy looks bad. Meanwhile, the informality that is acceptable in a documentation string is totally unacceptable in a manual. The only good way to use documentation strings in writing a good manual is to use them as a source of information for writing good text. @node Manual Structure Details @section Manual Structure Details @cindex manual structure The title page of the manual should state the version of the programs or packages documented in the manual. The Top node of the manual should also contain this information. If the manual is changing more frequently than or independent of the program, also state a version number for the manual in both of these places. Each program documented in the manual should have a node named @samp{@var{program} Invocation} or @samp{Invoking @var{program}}. This node (together with its subnodes, if any) should describe the program's command line arguments and how to run it (the sort of information people would look for in a man page). Start with an @samp{@@example} containing a template for all the options and arguments that the program uses. Alternatively, put a menu item in some menu whose item name fits one of the above patterns. This identifies the node which that item points to as the node for this purpose, regardless of the node's actual name. The @samp{--usage} feature of the Info reader looks for such a node or menu item in order to find the relevant text, so it is essential for every Texinfo file to have one. If one manual describes several programs, it should have such a node for each program described in the manual. @node License for Manuals @section License for Manuals @cindex license for manuals Please use the GNU Free Documentation License for all GNU manuals that are more than a few pages long. Likewise for a collection of short documents---you only need one copy of the GNU FDL for the whole collection. For a single short document, you can use a very permissive non-copyleft license, to avoid taking up space with a long license. See @uref{https://www.gnu.org/copyleft/fdl-howto.html} for more explanation of how to employ the GFDL. Note that it is not obligatory to include a copy of the GNU GPL or GNU LGPL in a manual whose license is neither the GPL nor the LGPL@. It can be a good idea to include the program's license in a large manual; in a short manual, whose size would be increased considerably by including the program's license, it is probably better not to include it. @node Manual Credits @section Manual Credits @cindex credits for manuals Please credit the principal human writers of the manual as the authors, on the title page of the manual. If a company sponsored the work, thank the company in a suitable place in the manual, but do not cite the company as an author. @node Printed Manuals @section Printed Manuals The FSF publishes some GNU manuals in printed form. To encourage sales of these manuals, the on-line versions of the manual should mention at the very start that the printed manual is available and should point at information for getting it---for instance, with a link to the page @url{https://www.gnu.org/order/order.html}. This should not be included in the printed manual, though, because there it is redundant. It is also useful to explain in the on-line forms of the manual how the user can print out the manual from the sources. @node NEWS File @section The NEWS File @cindex @file{NEWS} file In addition to its manual, the package should have a file named @file{NEWS} which contains a list of user-visible changes worth mentioning. In each new release, add items to the front of the file and identify the version they pertain to. Don't discard old items; leave them in the file after the newer items. This way, a user upgrading from any previous version can see what is new. If the @file{NEWS} file gets very long, move some of the older items into a file named @file{ONEWS} and put a note at the end referring the user to that file. @node Change Logs @section Change Logs @cindex change logs Keep a change log to describe all the changes made to program source files. The purpose of this is so that people investigating bugs in the future will know about the changes that might have introduced the bug. Often a new bug can be found by looking at what was recently changed. More importantly, change logs can help you eliminate conceptual inconsistencies between different parts of a program, by giving you a history of how the conflicting concepts arose, who they came from, and why the conflicting changes were made. @cindex software forensics, and change logs Therefore, change logs should be detailed enough and accurate enough to provide the information commonly required for such @dfn{software forensics}. Specifically, change logs should make finding answers to the following questions easy: @itemize @bullet @item What changes affected a particular source file? @item Was a particular source file renamed or moved, and if so, as part of what change? @item What changes affected a given function or macro or definition of a data structure? @item Was a function (or a macro or the definition of a data structure) renamed or moved from another file, and if so, as part of which change? @item What changes deleted a function (or macro or data structure)? @item What was the rationale for a given change, and what were its main ideas? @item Is there any additional information regarding the change, and if so, where can it be found? @end itemize @cindex VCS @cindex version control system, for keeping change logs Historically, change logs were maintained on specially formatted files. Nowadays, projects commonly keep their source files under a @dfn{version control system} (VCS), such as Git, Subversion, or Mercurial. If the VCS repository is publicly accessible, and changes are committed to it separately (one commit for each logical changeset) and record the authors of each change, then the information recorded by the VCS can be used to produce the change logs out of VCS logs, and to answer the above questions by using the suitable VCS commands. (However, the VCS log messages still need to provide some supporting information, as described below.) Projects that maintain such VCS repositories can decide not to maintain separate change log files, and instead rely on the VCS to keep the change logs. If you decide not to maintain separate change log files, you should still consider providing them in the release tarballs, for the benefit of users who'd like to review the change logs without accessing the project's VCS repository. Scripts exist that can produce @file{ChangeLog} files from the VCS logs; for example, the @file{gitlog-to-changelog} script, which is part of Gnulib, can do that for Git repositories. In Emacs, the command @kbd{C-x v a} (@code{vc-update-change-log}) does the job of incrementally updating a @file{ChangeLog} file from the VCS logs. If separate change log files @emph{are} maintained, they are normally called @file{ChangeLog}, and each such file covers an entire directory. Each directory can have its own change log file, or a directory can use the change log of its parent directory---it's up to you. @menu * Change Log Concepts:: * Style of Change Logs:: * Simple Changes:: * Conditional Changes:: * Indicating the Part Changed:: @end menu @node Change Log Concepts @subsection Change Log Concepts and Conventions @cindex changeset, in a change log @cindex batch of changes, in a change log You can think of the change log as a conceptual ``undo list'' which states how earlier versions were different from the current version. People can see the current version; they don't need the change log to tell them what is in it. What they want from a change log is a clear explanation of how the earlier version differed. Each @dfn{entry} in a change log describes either an individual change or the smallest batch of changes that belong together, also known as a @dfn{changeset}. @cindex title, change log entry @cindex header line, change log entry It is a good idea to start the change log entry with a @dfn{header line}: a single line that is a complete sentence which summarizes the changeset. If you keep the change log in a VCS, this should be a requirement, as VCS commands that show the change log in abbreviated form, such as @kbd{git log --oneline}, treat the header line specially. (In a @file{ChangeLog} file, the header line follows a line that says who was the author of the change and when it was installed.) @cindex description, change log entry Follow the change log entry's header line with a description of the overall change. This should be as long as needed to give a clear description. Pay special attention to aspects of the changeset not easily gleaned from the diffs or from the names of modified files and functions: the overall idea of the change and the need for it, and the relations, if any, between changes made to different files/functions. If the change or its reasons were discussed on some public forum, such as the project's issue tracker or mailing list, it is a good idea to summarize the main points of that discussion in the change's description, and include a pointer to that discussion or the issue ID for those who'd like to read it in full. The best place to explain how parts of the new code work with other code is in comments in the code, not in the change log. If you think that a change calls for explanation of @emph{why} the change was needed---that is, what problem the old code had such that it required this change---you're probably right. Please put the explanation in comments in the code, where people will see it whenever they see the code. An example of such an explanation is, ``This function used to be iterative, but that failed when MUMBLE was a tree.'' (Though such a simple reason would not need this kind of explanation.) The best place for other kinds of explanation of the change is in the change log entry. In particular, comments usually will not say why some code was deleted or moved to another place---that belongs to the description of the change which did that. Following the free-text description of the change, it is a good idea to give a list of names of the entities or definitions that you changed, according to the files they are in, and what was changed in each one. @xref{Style of Change Logs}. If a project uses a modern VCS to keep the change log information, as described in @ref{Change Logs}, explicitly listing the files and functions that were changed is not strictly necessary, and in some cases (like identical mechanical changes in many places) even tedious. It is up to you to decide whether to allow your project's developers to omit the list of changed files and functions from the log entries, and whether to allow such omissions under some specific conditions. However, while making this decision, please consider the following benefits of providing the list of changed entities with each change: @itemize @bullet @item Generation of useful @file{ChangeLog} files from VCS logs becomes more difficult if the change log entries don't list the modified functions/macros, because VCS commands cannot reliably reproduce their names from the commit information alone. For example, when there is a change in the header part of a function definition, the heading of the diff hunk as shown in the VCS log commands will name the wrong function as being modified (usually, the function defined before the one being modified), so using those diffs to glean the names of the modified functions will produce inaccurate results. You will need to use specialized scripts, such as gnulib's @file{vcs-to-changelog.py}, mentioned below, to solve these difficulties, and make sure it supports the source languages used by your project. @item While modern VCS commands, such as Git's @kbd{git log -L} and @kbd{git log -G}, provide powerful means for finding changes that affected a certain function or macro or data structure (and thus might make @file{ChangeLog} files unnecessary if you have the repository available), they can sometimes fail. For example, @kbd{git log -L} doesn't support syntax of some programming languages out of the box. Mentioning the modified functions/macros explicitly allows finding the related changes simply and reliably. @item Some VCS commands have difficulties or limitations when tracking changes across file moves or renames. Again, if the entities are mentioned explicitly, those difficulties can be overcome. @item Users that review changes using the generated @file{ChangeLog} files may not have the repository and the VCS commands available to them. Naming the modified entities alleviates that problem. @end itemize @noindent For these reasons, providing lists of modified files and functions with each change makes the change logs more useful, and we therefore recommend to include them whenever possible and practical. It is also possible to generate the lists naming the modified entities by running a script. One such script is @file{mklog.py} (written in Python 3); it is used by the @code{GCC} project. Gnulib provides another variant of such a script, called @file{vcs-to-changelog.py}, part of the @code{vcs-to-changelog} module. Note that these scripts currently support fewer programming languages than the manual commands provided by Emacs (@pxref{Style of Change Logs}). Therefore, the above mentioned method of generating the @code{ChangeLog} file from the VCS commit history, for instance via the @code{gitlog-to-changelog} script, usually gives better results---provided that the contributors stick to providing good commit messages. @node Style of Change Logs @subsection Style of Change Logs @cindex change logs, style Here are some simple examples of change log entries, starting with the header line that says who made the change and when it was installed, followed by descriptions of specific changes. (These examples are drawn from Emacs.) Keep in mind that the line which shows the date of the change and the author's name and email address is needed only in a separate @file{ChangeLog} file, not when the change logs are kept in a VCS. @example 2019-08-29 Noam Postavsky <npostavs@@gmail.com> Handle completely undecoded input in term (Bug#29918) * lisp/term.el (term-emulate-terminal): Avoid errors if the whole decoded string is eight-bit characters. Don't attempt to save the string for next iteration in that case. * test/lisp/term-tests.el (term-decode-partial) (term-undecodable-input): New tests. 2019-06-15 Paul Eggert <eggert@@cs.ucla.edu> Port to platforms where tputs is in libtinfow * configure.ac (tputs_library): Also try tinfow, ncursesw (Bug#33977). 2019-02-08 Eli Zaretskii <eliz@@gnu.org> Improve documentation of 'date-to-time' and 'parse-time-string' * doc/lispref/os.texi (Time Parsing): Document 'parse-time-string', and refer to it for the description of the argument of 'date-to-time'. * lisp/calendar/time-date.el (date-to-time): Refer in the doc string to 'parse-time-string' for more information about the format of the DATE argument. (Bug#34303) @end example If you mention the names of the modified functions or variables, it's important to name them in full. Don't abbreviate function or variable names, and don't combine them. Subsequent maintainers will often search for a function name to find all the change log entries that pertain to it; if you abbreviate the name, they won't find it when they search. For example, some people are tempted to abbreviate groups of function names by writing @samp{* register.el (@{insert,jump-to@}-register)}; this is not a good idea, since searching for @code{jump-to-register} or @code{insert-register} would not find that entry. Separate unrelated change log entries with blank lines. Don't put blank lines between individual changes of an entry. You can omit the file name and the asterisk when successive individual changes are in the same file. Break long lists of function names by closing continued lines with @samp{)}, rather than @samp{,}, and opening the continuation with @samp{(}. This makes highlighting in Emacs work better. Here is an example: @example * src/keyboard.c (menu_bar_items, tool_bar_items) (Fexecute_extended_command): Deal with 'keymap' property. @end example The easiest way to add an entry to @file{ChangeLog} is with the Emacs command @kbd{M-x add-change-log-entry}, or its variant @kbd{C-x 4 a} (@code{add-change-log-entry-other-window}). This automatically collects the name of the changed file and the changed function or variable, and formats a change log entry according to the conventions described above, leaving it up to you to describe the changes you made to that function or variable. When you install someone else's changes, put the contributor's name in the change log entry rather than in the text of the entry. In other words, write this: @example 2002-07-14 John Doe <jdoe@@gnu.org> * sewing.c: Make it sew. @end example @noindent rather than this: @example 2002-07-14 Usual Maintainer <usual@@gnu.org> * sewing.c: Make it sew. Patch by jdoe@@gnu.org. @end example When committing someone else's changes into a VCS, use the VCS features to specify the author. For example, with Git, use @kbd{git commit --author=@var{author}}. As for the date, that should be the date you applied the change. (With a VCS, use the appropriate command-line switches, e.g., @kbd{git commit --date=@var{date}}.) Modern VCS have commands to apply changes sent via email (e.g., Git has @kbd{git am}); in that case the author of the changeset and the date it was made will be automatically gleaned from the email message and recorded in the repository. If the patches are prepared with suitable VCS commands, such as @kbd{git format-patch}, the email message body will also have the original author of the changeset, so resending or forwarding the message will not interfere with attributing the changes to their author. Thus, we recommend that you request your contributors to use commands such as @kbd{git format-patch} to prepare the patches. @node Simple Changes @subsection Simple Changes Certain simple kinds of changes don't need much detail in the change log. If the description of the change is short enough, it can serve as its own header line: @example 2019-08-29 Eli Zaretskii <eliz@@gnu.org> * lisp/simple.el (kill-do-not-save-duplicates): Doc fix. (Bug#36827) @end example When you change the calling sequence of a function in a simple fashion, and you change all the callers of the function to use the new calling sequence, there is no need to make individual entries for all the callers that you changed. Just write in the entry for the function being called, ``All callers changed''---like this: @example * keyboard.c (Fcommand_execute): New arg SPECIAL. All callers changed. @end example When you change just comments or doc strings, it is enough to write an entry for the file, without mentioning the functions. Just ``Doc fixes'' is enough for the change log. When you make changes in many files that follow mechanically from one underlying change, it is enough to describe the underlying change. Here's an example of a change that affects all of the files in the repository: @example 2019-01-07 Paul Eggert <eggert@@cs.ucla.edu> Update copyright year to 2019 Run 'TZ=UTC0 admin/update-copyright $(git ls-files)'. @end example Test suite files are part of the software, so we recommend treating them as code for change-log purposes. There's no technical need to make change log entries for non-software files (manuals, help files, media files, etc.). This is because they are not susceptible to bugs that are hard to understand. To correct an error, you need not know the history of the erroneous passage; it is enough to compare what the file says with the actual facts. However, you should keep change logs for non-software files when the project gets copyright assignments from its contributors, so as to make the records of authorship more accurate. For that reason, we recommend to keep change logs for Texinfo sources of your project's manuals. @node Conditional Changes @subsection Conditional Changes @cindex conditional changes, and change logs @cindex change logs, conditional changes Source files can often contain code that is conditional to build-time or static conditions. For example, C programs can contain compile-time @code{#if} conditionals; programs implemented in interpreted languages can contain module imports of function definitions that are only performed for certain versions of the interpreter; and Automake @file{Makefile.am} files can contain variable definitions or target declarations that are only to be considered if a configure-time Automake conditional is true. Many changes are conditional as well: sometimes you add a new variable, or function, or even a new program or library, which is entirely dependent on a build-time condition. It is useful to indicate in the change log the conditions for which a change applies. Our convention for indicating conditional changes is to use @emph{square brackets around the name of the condition}. Conditional changes can happen in numerous scenarios and with many variations, so here are some examples to help clarify. This first example describes changes in C, Perl, and Python files which are conditional but do not have an associated function or entity name: @example * xterm.c [SOLARIS2]: Include <string.h>. * FilePath.pm [$^O eq 'VMS']: Import the VMS::Feature module. * framework.py [sys.version_info < (2, 6)]: Make "with" statement available by importing it from __future__, to support also python 2.5. @end example Our other examples will for simplicity be limited to C, as the minor changes necessary to adapt them to other languages should be self-evident. Next, here is an entry describing a new definition which is entirely conditional: the C macro @code{FRAME_WINDOW_P} is defined (and used) only when the macro @code{HAVE_X_WINDOWS} is defined: @example * frame.h [HAVE_X_WINDOWS] (FRAME_WINDOW_P): Macro defined. @end example Next, an entry for a change within the function @code{init_display}, whose definition as a whole is unconditional, but the changes themselves are contained in a @samp{#ifdef HAVE_LIBNCURSES} conditional: @example * dispnew.c (init_display) [HAVE_LIBNCURSES]: If X, call tgetent. @end example Finally, here is an entry for a change that takes effect only when a certain macro is @emph{not} defined: @example * host.c (gethostname) [!HAVE_SOCKETS]: Replace with winsock version. @end example @node Indicating the Part Changed @subsection Indicating the Part Changed Indicate the part of a function which changed by using angle brackets enclosing an indication of what the changed part does. Here is an entry for a change in the part of the function @code{sh-while-getopts} that deals with @code{sh} commands: @example * progmodes/sh-script.el (sh-while-getopts) <sh>: Handle case that user-specified option string is empty. @end example @node Man Pages @section Man Pages @cindex man pages In the GNU project, man pages are secondary. It is not necessary or expected for every GNU program to have a man page, but some of them do. It's your choice whether to include a man page in your program. When you make this decision, consider that supporting a man page requires continual effort each time the program is changed. The time you spend on the man page is time taken away from more useful work. For a simple program which changes little, updating the man page may be a small job. Then there is little reason not to include a man page, if you have one. For a large program that changes a great deal, updating a man page may be a substantial burden. If a user offers to donate a man page, you may find this gift costly to accept. It may be better to refuse the man page unless the same person agrees to take full responsibility for maintaining it---so that you can wash your hands of it entirely. If this volunteer later ceases to do the job, then don't feel obliged to pick it up yourself; it may be better to withdraw the man page from the distribution until someone else agrees to update it. When a program changes only a little, you may feel that the discrepancies are small enough that the man page remains useful without updating. If so, put a prominent note near the beginning of the man page stating that you don't maintain it and that the Texinfo manual is more authoritative. The note should say how to access the Texinfo documentation. Be sure that man pages include a copyright statement and free license. The simple all-permissive license is appropriate for simple man pages (@pxref{License Notices for Other Files,,,maintain,Information for GNU Maintainers}). For long man pages, with enough explanation and documentation that they can be considered true manuals, use the GFDL (@pxref{License for Manuals}). Finally, the GNU help2man program (@uref{https://www.gnu.org/software/help2man/}) is one way to automate generation of a man page, in this case from @option{--help} output. This is sufficient in many cases. @node Reading other Manuals @section Reading other Manuals There may be non-free books or documentation files that describe the program you are documenting. It is ok to use these documents for reference, just as the author of a new algebra textbook can read other books on algebra. A large portion of any non-fiction book consists of facts, in this case facts about how a certain program works, and these facts are necessarily the same for everyone who writes about the subject. But be careful not to copy your outline structure, wording, tables or examples from preexisting non-free documentation. Copying from free documentation may be ok; please check with the FSF about the individual case. @node Managing Releases @chapter The Release Process @cindex releasing Making a release is more than just bundling up your source files in a tar file and putting it up for FTP@. You should set up your software so that it can be configured to run on a variety of systems. Your Makefile should conform to the GNU standards described below, and your directory layout should also conform to the standards discussed below. Doing so makes it easy to include your package into the larger framework of all GNU software. @menu * Configuration:: How configuration of GNU packages should work. * Makefile Conventions:: Makefile conventions. * Releases:: Making releases @end menu @node Configuration @section How Configuration Should Work @cindex program configuration @pindex configure Each GNU distribution should come with a shell script named @code{configure}. This script is given arguments which describe the kind of machine and system you want to compile the program for. The @code{configure} script must record the configuration options so that they affect compilation. The description here is the specification of the interface for the @code{configure} script in GNU packages. Many packages implement it using GNU Autoconf (@pxref{Top,, Introduction, autoconf, Autoconf}) and/or GNU Automake (@pxref{Top,, Introduction, automake, Automake}), but you do not have to use these tools. You can implement it any way you like; for instance, by making @code{configure} be a wrapper around a completely different configuration system. Another way for the @code{configure} script to operate is to make a link from a standard name such as @file{config.h} to the proper configuration file for the chosen system. If you use this technique, the distribution should @emph{not} contain a file named @file{config.h}. This is so that people won't be able to build the program without configuring it first. Another thing that @code{configure} can do is to edit the Makefile. If you do this, the distribution should @emph{not} contain a file named @file{Makefile}. Instead, it should include a file @file{Makefile.in} which contains the input used for editing. Once again, this is so that people won't be able to build the program without configuring it first. If @code{configure} does write the @file{Makefile}, then @file{Makefile} should have a target named @file{Makefile} which causes @code{configure} to be rerun, setting up the same configuration that was set up last time. The files that @code{configure} reads should be listed as dependencies of @file{Makefile}. All the files which are output from the @code{configure} script should have comments at the beginning stating that they were generated automatically using @code{configure}. This is so that users won't think of trying to edit them by hand. The @code{configure} script should write a file named @file{config.status} which describes which configuration options were specified when the program was last configured. This file should be a shell script which, if run, will recreate the same configuration. The @code{configure} script should accept an option of the form @samp{--srcdir=@var{dirname}} to specify the directory where sources are found (if it is not the current directory). This makes it possible to build the program in a separate directory, so that the actual source directory is not modified. If the user does not specify @samp{--srcdir}, then @code{configure} should check both @file{.} and @file{..} to see if it can find the sources. If it finds the sources in one of these places, it should use them from there. Otherwise, it should report that it cannot find the sources, and should exit with nonzero status. Usually the easy way to support @samp{--srcdir} is by editing a definition of @code{VPATH} into the Makefile. Some rules may need to refer explicitly to the specified source directory. To make this possible, @code{configure} can add to the Makefile a variable named @code{srcdir} whose value is precisely the specified directory. In addition, the @samp{configure} script should take options corresponding to most of the standard directory variables (@pxref{Directory Variables}). Here is the list: @example --prefix --exec-prefix --bindir --sbindir --libexecdir --sysconfdir --sharedstatedir --localstatedir --runstatedir --libdir --includedir --oldincludedir --datarootdir --datadir --infodir --localedir --mandir --docdir --htmldir --dvidir --pdfdir --psdir @end example The @code{configure} script should also take an argument which specifies the type of system to build the program for. This argument should look like this: @example @var{cpu}-@var{company}-@var{system} @end example For example, an Athlon-based GNU/Linux system might be @samp{i686-pc-linux-gnu}. The @code{configure} script needs to be able to decode all plausible alternatives for how to describe a machine. Thus, @samp{athlon-pc-gnu/linux} would be a valid alias. There is a shell script called @uref{https://git.savannah.gnu.org/cgit/config.git/plain/config.sub, @file{config.sub}} that you can use as a subroutine to validate system types and canonicalize aliases. The @code{configure} script should also take the option @option{--build=@var{buildtype}}, which should be equivalent to a plain @var{buildtype} argument. For example, @samp{configure --build=i686-pc-linux-gnu} is equivalent to @samp{configure i686-pc-linux-gnu}. When the build type is not specified by an option or argument, the @code{configure} script should normally guess it using the shell script @uref{https://git.savannah.gnu.org/cgit/config.git/plain/config.guess, @file{config.guess}}. @cindex optional features, configure-time Other options are permitted to specify in more detail the software or hardware present on the machine, to include or exclude optional parts of the package, or to adjust the name of some tools or arguments to them: @table @samp @item --enable-@var{feature}@r{[}=@var{parameter}@r{]} Configure the package to build and install an optional user-level facility called @var{feature}. This allows users to choose which optional features to include. Giving an optional @var{parameter} of @samp{no} should omit @var{feature}, if it is built by default. No @samp{--enable} option should @strong{ever} cause one feature to replace another. No @samp{--enable} option should ever substitute one useful behavior for another useful behavior. The only proper use for @samp{--enable} is for questions of whether to build part of the program or exclude it. @item --with-@var{package} @c @r{[}=@var{parameter}@r{]} The package @var{package} will be installed, so configure this package to work with @var{package}. @c Giving an optional @var{parameter} of @c @samp{no} should omit @var{package}, if it is used by default. Possible values of @var{package} include @samp{gnu-as} (or @samp{gas}), @samp{gnu-ld}, @samp{gnu-libc}, @samp{gdb}, @samp{x}, and @samp{x-toolkit}. Do not use a @samp{--with} option to specify the file name to use to find certain files. That is outside the scope of what @samp{--with} options are for. @item @var{variable}=@var{value} Set the value of the variable @var{variable} to @var{value}. This is used to override the default values of commands or arguments in the build process. For example, the user could issue @samp{configure CFLAGS=-g CXXFLAGS=-g} to build with debugging information and without the default optimization. Specifying variables as arguments to @code{configure}, like this: @example ./configure CC=gcc @end example is preferable to setting them in environment variables: @example CC=gcc ./configure @end example as it helps to recreate the same configuration later with @file{config.status}. However, both methods should be supported. @end table All @code{configure} scripts should accept all of the ``detail'' options and the variable settings, whether or not they make any difference to the particular package at hand. In particular, they should accept any option that starts with @samp{--with-} or @samp{--enable-}. This is so users will be able to configure an entire GNU source tree at once with a single set of options. You will note that the categories @samp{--with-} and @samp{--enable-} are narrow: they @strong{do not} provide a place for any sort of option you might think of. That is deliberate. We want to limit the possible configuration options in GNU software. We do not want GNU programs to have idiosyncratic configuration options. Packages that perform part of the compilation process may support cross-compilation. In such a case, the host and target machines for the program may be different. The @code{configure} script should normally treat the specified type of system as both the host and the target, thus producing a program which works for the same type of machine that it runs on. To compile a program to run on a host type that differs from the build type, use the configure option @option{--host=@var{hosttype}}, where @var{hosttype} uses the same syntax as @var{buildtype}. The host type normally defaults to the build type. To configure a cross-compiler, cross-assembler, or what have you, you should specify a target different from the host, using the configure option @samp{--target=@var{targettype}}. The syntax for @var{targettype} is the same as for the host type. So the command would look like this: @example ./configure --host=@var{hosttype} --target=@var{targettype} @end example The target type normally defaults to the host type. Programs for which cross-operation is not meaningful need not accept the @samp{--target} option, because configuring an entire operating system for cross-operation is not a meaningful operation. Some programs have ways of configuring themselves automatically. If your program is set up to do this, your @code{configure} script can simply ignore most of its arguments. @comment The makefile standards are in a separate file that is also @comment included by make.texinfo. Done by roland@gnu.ai.mit.edu on 1/6/93. @comment For this document, turn chapters into sections, etc. @lowersections @include make-stds.texi @raisesections @node Releases @section Making Releases @cindex packaging @cindex version numbers, for releases You should identify each release with a pair of version numbers, a major version and a minor. We have no objection to using more than two numbers, but it is very unlikely that you really need them. Package the distribution of @code{Foo version 69.96} up in a gzipped tar file with the name @file{foo-69.96.tar.gz}. It should unpack into a subdirectory named @file{foo-69.96}. Building and installing the program should never modify any of the files contained in the distribution. This means that all the files that form part of the program in any way must be classified into @dfn{source files} and @dfn{non-source files}. Source files are written by humans and never changed automatically; non-source files are produced from source files by programs under the control of the Makefile. @cindex @file{README} file The distribution should contain a file named @file{README} with a general overview of the package: @itemize @item the name of the package; @item the version number of the package, or refer to where in the package the version can be found; @item a general description of what the package does; @item a reference to the file @file{INSTALL}, which should in turn contain an explanation of the installation procedure; @item a brief explanation of any unusual top-level directories or files, or other hints for readers to find their way around the source; @item a reference to the file which contains the copying conditions. The GNU GPL, if used, should be in a file called @file{COPYING}. If the GNU LGPL is used, it should be in a file called @file{COPYING.LESSER}. @end itemize Naturally, all the source files must be in the distribution. It is okay to include non-source files in the distribution along with the source files they are generated from, provided they are up-to-date with the source they are made from, and machine-independent, so that normal building of the distribution will never modify them. We commonly include non-source files produced by Autoconf, Automake, Bison, @code{flex}, @TeX{}, and @code{makeinfo}; this helps avoid unnecessary dependencies between our distributions, so that users can install whichever versions of whichever packages they like. Do not induce new dependencies on other software lightly. Non-source files that might actually be modified by building and installing the program should @strong{never} be included in the distribution. So if you do distribute non-source files, always make sure they are up to date when you make a new distribution. Make sure that all the files in the distribution are world-readable, and that directories are world-readable and world-searchable (octal mode 755). We used to recommend that all directories in the distribution also be world-writable (octal mode 777), because ancient versions of @code{tar} would otherwise not cope when extracting the archive as an unprivileged user. That can easily lead to security issues when creating the archive, however, so now we recommend against that. Don't include any symbolic links in the distribution itself. If the tar file contains symbolic links, then people cannot even unpack it on systems that don't support symbolic links. Also, don't use multiple names for one file in different directories, because certain file systems cannot handle this and that prevents unpacking the distribution. Try to make sure that all the file names will be unique on MS-DOS@. A name on MS-DOS consists of up to 8 characters, optionally followed by a period and up to three characters. MS-DOS will truncate extra characters both before and after the period. Thus, @file{foobarhacker.c} and @file{foobarhacker.o} are not ambiguous; they are truncated to @file{foobarha.c} and @file{foobarha.o}, which are distinct. @cindex @file{texinfo.tex}, in a distribution Include in your distribution a copy of the @file{texinfo.tex} you used to test print any @file{*.texinfo} or @file{*.texi} files. Likewise, if your program uses small GNU software packages like regex, getopt, obstack, or termcap, include them in the distribution file. Leaving them out would make the distribution file a little smaller at the expense of possible inconvenience to a user who doesn't know what other files to get. @node References @chapter References to Non-Free Software and Documentation @cindex references to non-free material A GNU program should not recommend, promote, or grant legitimacy to the use of any non-free program. Proprietary software is a social and ethical problem, and our aim is to put an end to that problem. We can't stop some people from writing proprietary programs, or stop other people from using them, but we can and should refuse to advertise them to new potential customers, or to give the public the impression that their existence is legitimate. The GNU definition of free software is found on the GNU web site at @url{https://www.gnu.org/@/philosophy/@/free-sw.html}, and the definition of free documentation is found at @url{https://www.gnu.org/@/philosophy/@/free-doc.html}. The terms ``free'' and ``non-free'', used in this document, refer to those definitions. A list of important licenses and whether they qualify as free is in @url{https://www.gnu.org/@/licenses/@/license-list.html}. If it is not clear whether a license qualifies as free, please ask the GNU Project by writing to @email{licensing@@gnu.org}. We will answer, and if the license is an important one, we will add it to the list. When a non-free program or system is well known, you can mention it in passing---that is harmless, since users who might want to use it probably already know about it. For instance, it is fine to explain how to build your package on top of some widely used non-free operating system, or how to use it together with some widely used non-free program, after first explaining how to use it on the GNU system. However, you should give only the necessary information to help those who already use the non-free program to use your program with it---don't give, or refer to, any further information about the proprietary program, and don't imply that the proprietary program enhances your program, or that its existence is in any way a good thing. The goal should be that people already using the proprietary program will get the advice they need about how to use your free program with it, while people who don't already use the proprietary program will not see anything likely to lead them to take an interest in it. You shouldn't recommend any non-free add-ons for the non-free program, but it is ok to mention free add-ons that help it to work with your program, and how to install the free add-ons even if that requires running some non-free program. If a non-free program or system is obscure in your program's domain, your program should not mention or support it at all, since doing so would tend to popularize the non-free program more than it popularizes your program. (You cannot hope to find many additional users for your program among the users of Foobar, if the existence of Foobar is not generally known among people who might want to use your program.) Sometimes a program is free software in itself but depends on a non-free platform in order to run. For instance, it used to be the case that many Java programs depended on some non-free Java libraries. (See @uref{https://www.gnu.org/philosophy/java-trap.html}.) To recommend or promote such a program is to promote the other programs it needs; therefore, judge mentions of the former as if they were mentions of the latter. For this reason, we were careful about listing Java programs in the Free Software Directory: we wanted to avoid promoting the non-free Java libraries. Java no longer has this problem, but the general principle will remain the same: don't recommend, promote or legitimize programs that depend on non-free software to run. Some free programs strongly encourage the use of non-free software. A typical example is @command{mplayer}. It is free software in itself, and the free code can handle some kinds of files. However, @command{mplayer} recommends use of non-free codecs for other kinds of files, and users that install @command{mplayer} are very likely to install those codecs along with it. To recommend @command{mplayer} is, in effect, to promote use of the non-free codecs. Thus, you should not recommend programs that strongly encourage the use of non-free software. This is why we do not list @command{mplayer} in the Free Software Directory. A GNU package should not refer the user to any non-free documentation for free software. Free documentation that can be included in free operating systems is essential for completing the GNU system, or any free operating system, so encouraging it is a priority; to recommend use of documentation that we are not allowed to include undermines the impetus for the community to produce documentation that we can include. So GNU packages should never recommend non-free documentation. By contrast, it is ok to refer to journal articles and textbooks in the comments of a program for explanation of how it functions, even though they are non-free. This is because we don't include such things in the GNU system even if they are free---they are outside the scope of what a software distribution needs to include. Referring to a web site that describes or recommends a non-free program is promoting that program, so please do not make links to (or mention by name) web sites that contain such material. This policy is relevant particularly for the web pages for a GNU package. What about chains of links? Following links from nearly any web site can lead eventually to promotion of non-free software; this is inherent in the nature of the web. Here's how we treat that. You should not refer to AT&T's web site if that recommends AT&T's non-free software packages; you should not refer to a page @var{p} that links to AT&T's site presenting it as a place to get some non-free program, because that part of the page @var{p} itself recommends and legitimizes the non-free program. However, if @var{p} contains a link to AT&T's web site for some other purpose (such as long-distance telephone service), that is no reason you should not link to @var{p}. A web page recommends a program in an implicit but particularly strong way if it requires users to run that program in order to use the page. Many pages contain Javascript code which they recommend in this way. This Javascript code may be free or nonfree, but nonfree is the usual case. If the purpose for which you would refer to the page cannot be carried out without running nonfree Javascript code, then you should not refer to it. Thus, if the purpose of referring to the page is for people to view a video, or subscribing to a mailing list, and the viewing or subscribing fail to work if the user's browser blocks the nonfree Javascript code, then don't refer to that page. The extreme case is that of web sites which depend on nonfree Javascript code even to @emph{see} the contents of the pages. Any site hosted on @indicateurl{wix.com} has this problem, and so do some other sites. Referring people to such pages to read their contents is, in effect, urging them to run those nonfree programs---so please don't refer to those pages. (Such pages also break the Web, so they deserve condemnation for two reasons.) Instead, please quote excerpts from the page to make your point, or find another place to refer to that information. @node GNU Free Documentation License @appendix GNU Free Documentation License @cindex FDL, GNU Free Documentation License @include fdl.texi @node Index @unnumbered Index @printindex cp @bye Local variables: eval: (add-hook 'before-save-hook 'time-stamp) time-stamp-start: "@set lastupdate " time-stamp-end: "$" time-stamp-format: "%:b %:d, %:y" compile-command: "cd work.s && make" End: ���������������������������������������������������������������autoconf-2.71/doc/gendocs_template������������������������������������������������������������������0000644�0000000�0000000�00000007346�14004621310�014156� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<!--#include virtual="/server/header.html" --> <!-- Parent-Version: 1.78 --> <!-- Copyright (C) 2006-2021 Free Software Foundation, Inc. Copying and distribution of this file, with or without modification, are permitted in any medium without royalty provided the copyright notice and this notice are preserved. This file is offered as-is, without any warranty. --> <title>%%TITLE%% - GNU Project - Free Software Foundation

%%TITLE%%

Free Software Foundation
last updated %%DATE%%

This manual (%%PACKAGE%%) is available in the following formats:

You can buy printed copies of some manuals (among other items) from the Free Software Foundation; this helps support FSF activities.

(This page generated by the %%SCRIPTNAME%% script.)

autoconf-2.71/doc/autoconf.info0000644000000000000000000450600414004623451013420 00000000000000This is autoconf.info, produced by makeinfo version 6.7 from autoconf.texi. This manual (28 January 2021) is for GNU Autoconf (version 2.71), a package for creating scripts to configure source code packages using templates and an M4 macro package. Copyright © 1992–1996, 1998–2017, 2020–2021 Free Software Foundation, Inc. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover texts, and no Back-Cover Texts. A copy of the license is included in the section entitled “GNU Free Documentation License.” INFO-DIR-SECTION Software development START-INFO-DIR-ENTRY * Autoconf: (autoconf). Create source code configuration scripts. END-INFO-DIR-ENTRY INFO-DIR-SECTION Individual utilities START-INFO-DIR-ENTRY * autoscan: (autoconf)autoscan Invocation. Semi-automatic ‘configure.ac’ writing * ifnames: (autoconf)ifnames Invocation. Listing conditionals in source. * autoconf-invocation: (autoconf)autoconf Invocation. How to create configuration scripts * autoreconf: (autoconf)autoreconf Invocation. Remaking multiple ‘configure’ scripts * autoheader: (autoconf)autoheader Invocation. How to create configuration templates * autom4te: (autoconf)autom4te Invocation. The Autoconf executables backbone * configure: (autoconf)configure Invocation. Configuring a package. * autoupdate: (autoconf)autoupdate Invocation. Automatic update of ‘configure.ac’ * config.status: (autoconf)config.status Invocation. Recreating configurations. * testsuite: (autoconf)testsuite Invocation. Running an Autotest test suite. END-INFO-DIR-ENTRY  File: autoconf.info, Node: Top, Next: Introduction, Up: (dir) Autoconf ******** This manual (28 January 2021) is for GNU Autoconf (version 2.71), a package for creating scripts to configure source code packages using templates and an M4 macro package. Copyright © 1992–1996, 1998–2017, 2020–2021 Free Software Foundation, Inc. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover texts, and no Back-Cover Texts. A copy of the license is included in the section entitled “GNU Free Documentation License.” * Menu: * Introduction:: Autoconf’s purpose, strengths, and weaknesses * The GNU Build System:: A set of tools for portable software packages * Making configure Scripts:: How to organize and produce Autoconf scripts * Setup:: Initialization and output * Existing Tests:: Macros that check for particular features * Writing Tests:: How to write new feature checks * Results:: What to do with results from feature checks * Programming in M4:: Layers on top of which Autoconf is written * Programming in M4sh:: Shell portability layer * Writing Autoconf Macros:: Adding new macros to Autoconf * Portable Shell:: Shell script portability pitfalls * Portable Make:: Makefile portability pitfalls * Portable C and C++:: C and C++ portability pitfalls * Manual Configuration:: Selecting features that can’t be guessed * Site Configuration:: Local defaults for ‘configure’ * Running configure Scripts:: How to use the Autoconf output * config.status Invocation:: Recreating a configuration * Obsolete Constructs:: Kept for backward compatibility * Using Autotest:: Creating portable test suites * FAQ:: Frequent Autoconf Questions, with answers * History:: History of Autoconf * GNU Free Documentation License:: License for copying this manual * Indices:: Indices of symbols, concepts, etc. — The Detailed Node Listing — The GNU Build System * Automake:: Escaping makefile hell * Gnulib:: The GNU portability library * Libtool:: Building libraries portably * Pointers:: More info on the GNU build system Making ‘configure’ Scripts * Writing Autoconf Input:: What to put in an Autoconf input file * autoscan Invocation:: Semi-automatic ‘configure.ac’ writing * ifnames Invocation:: Listing the conditionals in source code * autoconf Invocation:: How to create configuration scripts * autoreconf Invocation:: Remaking multiple ‘configure’ scripts Writing ‘configure.ac’ * Shell Script Compiler:: Autoconf as solution of a problem * Autoconf Language:: Programming in Autoconf * Autoconf Input Layout:: Standard organization of ‘configure.ac’ Initialization and Output Files * Initializing configure:: Option processing etc. * Versioning:: Dealing with Autoconf versions * Notices:: Copyright, version numbers in ‘configure’ * Input:: Where Autoconf should find files * Output:: Outputting results from the configuration * Configuration Actions:: Preparing the output based on results * Configuration Files:: Creating output files * Makefile Substitutions:: Using output variables in makefiles * Configuration Headers:: Creating a configuration header file * Configuration Commands:: Running arbitrary instantiation commands * Configuration Links:: Links depending on the configuration * Subdirectories:: Configuring independent packages together * Default Prefix:: Changing the default installation prefix Substitutions in Makefiles * Preset Output Variables:: Output variables that are always set * Installation Directory Variables:: Other preset output variables * Changed Directory Variables:: Warnings about ‘datarootdir’ * Build Directories:: Supporting multiple concurrent compiles * Automatic Remaking:: Makefile rules for configuring Configuration Header Files * Header Templates:: Input for the configuration headers * autoheader Invocation:: How to create configuration templates * Autoheader Macros:: How to specify CPP templates Existing Tests * Common Behavior:: Macros’ standard schemes * Alternative Programs:: Selecting between alternative programs * Files:: Checking for the existence of files * Libraries:: Library archives that might be missing * Library Functions:: C library functions that might be missing * Header Files:: Header files that might be missing * Declarations:: Declarations that may be missing * Structures:: Structures or members that might be missing * Types:: Types that might be missing * Compilers and Preprocessors:: Checking for compiling programs * System Services:: Operating system services * C and Posix Variants:: Kludges for C and Posix variants * Erlang Libraries:: Checking for the existence of Erlang libraries Common Behavior * Standard Symbols:: Symbols defined by the macros * Default Includes:: Includes used by the generic macros Alternative Programs * Particular Programs:: Special handling to find certain programs * Generic Programs:: How to find other programs Library Functions * Function Portability:: Pitfalls with usual functions * Particular Functions:: Special handling to find certain functions * Generic Functions:: How to find other functions Header Files * Header Portability:: Collected knowledge on common headers * Particular Headers:: Special handling to find certain headers * Generic Headers:: How to find other headers Declarations * Particular Declarations:: Macros to check for certain declarations * Generic Declarations:: How to find other declarations Structures * Particular Structures:: Macros to check for certain structure members * Generic Structures:: How to find other structure members Types * Particular Types:: Special handling to find certain types * Generic Types:: How to find other types Compilers and Preprocessors * Specific Compiler Characteristics:: Some portability issues * Generic Compiler Characteristics:: Language independent tests and features * C Compiler:: Checking its characteristics * C++ Compiler:: Likewise * Objective C Compiler:: Likewise * Objective C++ Compiler:: Likewise * Erlang Compiler and Interpreter:: Likewise * Fortran Compiler:: Likewise * Go Compiler:: Likewise Writing Tests * Language Choice:: Selecting which language to use for testing * Writing Test Programs:: Forging source files for compilers * Running the Preprocessor:: Detecting preprocessor symbols * Running the Compiler:: Detecting language or header features * Running the Linker:: Detecting library features * Runtime:: Testing for runtime features * Systemology:: A zoology of operating systems * Multiple Cases:: Tests for several possible values Writing Test Programs * Guidelines:: General rules for writing test programs * Test Functions:: Avoiding pitfalls in test programs * Generating Sources:: Source program boilerplate Results of Tests * Defining Symbols:: Defining C preprocessor symbols * Setting Output Variables:: Replacing variables in output files * Special Chars in Variables:: Characters to beware of in variables * Caching Results:: Speeding up subsequent ‘configure’ runs * Printing Messages:: Notifying ‘configure’ users Caching Results * Cache Variable Names:: Shell variables used in caches * Cache Files:: Files ‘configure’ uses for caching * Cache Checkpointing:: Loading and saving the cache file Programming in M4 * M4 Quotation:: Protecting macros from unwanted expansion * Using autom4te:: The Autoconf executables backbone * Programming in M4sugar:: Convenient pure M4 macros * Debugging via autom4te:: Figuring out what M4 was doing M4 Quotation * Active Characters:: Characters that change the behavior of M4 * One Macro Call:: Quotation and one macro call * Quoting and Parameters:: M4 vs. shell parameters * Quotation and Nested Macros:: Macros calling macros * Changequote is Evil:: Worse than INTERCAL: M4 + changequote * Quadrigraphs:: Another way to escape special characters * Balancing Parentheses:: Dealing with unbalanced parentheses * Quotation Rule Of Thumb:: One parenthesis, one quote Using ‘autom4te’ * autom4te Invocation:: A GNU M4 wrapper * Customizing autom4te:: Customizing the Autoconf package Programming in M4sugar * Redefined M4 Macros:: M4 builtins changed in M4sugar * Diagnostic Macros:: Diagnostic messages from M4sugar * Diversion support:: Diversions in M4sugar * Conditional constructs:: Conditions in M4 * Looping constructs:: Iteration in M4 * Evaluation Macros:: More quotation and evaluation control * Text processing Macros:: String manipulation in M4 * Number processing Macros:: Arithmetic computation in M4 * Set manipulation Macros:: Set manipulation in M4 * Forbidden Patterns:: Catching unexpanded macros Programming in M4sh * Common Shell Constructs:: Portability layer for common shell constructs * Polymorphic Variables:: Support for indirect variable names * Initialization Macros:: Macros to establish a sane shell environment * File Descriptor Macros:: File descriptor macros for input and output Writing Autoconf Macros * Macro Definitions:: Basic format of an Autoconf macro * Macro Names:: What to call your new macros * Dependencies Between Macros:: What to do when macros depend on other macros * Obsoleting Macros:: Warning about old ways of doing things * Coding Style:: Writing Autoconf macros à la Autoconf Dependencies Between Macros * Prerequisite Macros:: Ensuring required information * Suggested Ordering:: Warning about possible ordering problems * One-Shot Macros:: Ensuring a macro is called only once Portable Shell Programming * Shellology:: A zoology of shells * Invoking the Shell:: Invoking the shell as a command * Here-Documents:: Quirks and tricks * File Descriptors:: FDs and redirections * Signal Handling:: Shells, signals, and headaches * File System Conventions:: File names * Shell Pattern Matching:: Pattern matching * Shell Substitutions:: Variable and command expansions * Assignments:: Varying side effects of assignments * Parentheses:: Parentheses in shell scripts * Slashes:: Slashes in shell scripts * Special Shell Variables:: Variables you should not change * Shell Functions:: What to look out for if you use them * Limitations of Builtins:: Portable use of not so portable /bin/sh * Limitations of Usual Tools:: Portable use of portable tools Portable Make Programming * $< in Ordinary Make Rules:: $< in ordinary rules * Failure in Make Rules:: Failing portably in rules * Special Chars in Names:: Special Characters in Macro Names * Backslash-Newline-Empty:: Empty lines after backslash-newline * Backslash-Newline Comments:: Spanning comments across line boundaries * Long Lines in Makefiles:: Line length limitations * Macros and Submakes:: ‘make macro=value’ and submakes * The Make Macro MAKEFLAGS:: ‘$(MAKEFLAGS)’ portability issues * The Make Macro SHELL:: ‘$(SHELL)’ portability issues * Parallel Make:: Parallel ‘make’ quirks * Comments in Make Rules:: Other problems with Make comments * Newlines in Make Rules:: Using literal newlines in rules * Comments in Make Macros:: Other problems with Make comments in macros * Trailing whitespace in Make Macros:: Macro substitution problems * Command-line Macros and whitespace:: Whitespace trimming of values * obj/ and Make:: Don’t name a subdirectory ‘obj’ * make -k Status:: Exit status of ‘make -k’ * VPATH and Make:: ‘VPATH’ woes * Single Suffix Rules:: Single suffix rules and separated dependencies * Timestamps and Make:: Sub-second timestamp resolution ‘VPATH’ and Make * Variables listed in VPATH:: ‘VPATH’ must be literal on ancient hosts * VPATH and Double-colon:: Problems with ‘::’ on ancient hosts * $< in Explicit Rules:: ‘$<’ does not work in ordinary rules * Automatic Rule Rewriting:: ‘VPATH’ goes wild on Solaris * Tru64 Directory Magic:: ‘mkdir’ goes wild on Tru64 * Make Target Lookup:: More details about ‘VPATH’ lookup Portable C and C++ Programming * Varieties of Unportability:: How to make your programs unportable * Integer Overflow:: When integers get too large * Preprocessor Arithmetic:: ‘#if’ expression problems * Null Pointers:: Properties of null pointers * Buffer Overruns:: Subscript errors and the like * Volatile Objects:: ‘volatile’ and signals * Floating Point Portability:: Portable floating-point arithmetic * Exiting Portably:: Exiting and the exit status Integer Overflow * Integer Overflow Basics:: Why integer overflow is a problem * Signed Overflow Examples:: Examples of code assuming wraparound * Optimization and Wraparound:: Optimizations that break uses of wraparound * Signed Overflow Advice:: Practical advice for signed overflow issues * Signed Integer Division:: ‘INT_MIN / -1’ and ‘INT_MIN % -1’ Manual Configuration * Specifying Target Triplets:: Specifying target triplets * Canonicalizing:: Getting the canonical system type * Using System Type:: What to do with the system type Site Configuration * Help Formatting:: Customizing ‘configure --help’ * External Software:: Working with other optional software * Package Options:: Selecting optional features * Pretty Help Strings:: Formatting help string * Option Checking:: Controlling checking of ‘configure’ options * Site Details:: Configuring site details * Transforming Names:: Changing program names when installing * Site Defaults:: Giving ‘configure’ local defaults Transforming Program Names When Installing * Transformation Options:: ‘configure’ options to transform names * Transformation Examples:: Sample uses of transforming names * Transformation Rules:: Makefile uses of transforming names Running ‘configure’ Scripts * Basic Installation:: Instructions for typical cases * Compilers and Options:: Selecting compilers and optimization * Multiple Architectures:: Compiling for multiple architectures at once * Installation Names:: Installing in different directories * Optional Features:: Selecting optional features * Particular Systems:: Particular systems * System Type:: Specifying the system type * Sharing Defaults:: Setting site-wide defaults for ‘configure’ * Defining Variables:: Specifying the compiler etc. * configure Invocation:: Changing how ‘configure’ runs Obsolete Constructs * Obsolete config.status Use:: Obsolete convention for ‘config.status’ * acconfig Header:: Additional entries in ‘config.h.in’ * autoupdate Invocation:: Automatic update of ‘configure.ac’ * Obsolete Macros:: Backward compatibility macros * Autoconf 1:: Tips for upgrading your files * Autoconf 2.13:: Some fresher tips Upgrading From Version 1 * Changed File Names:: Files you might rename * Changed Makefiles:: New things to put in ‘Makefile.in’ * Changed Macros:: Macro calls you might replace * Changed Results:: Changes in how to check test results * Changed Macro Writing:: Better ways to write your own macros Upgrading From Version 2.13 * Changed Quotation:: Broken code which used to work * New Macros:: Interaction with foreign macros * Hosts and Cross-Compilation:: Bugward compatibility kludges * AC_LIBOBJ vs LIBOBJS:: LIBOBJS is a forbidden token * AC_ACT_IFELSE vs AC_TRY_ACT:: A more generic scheme for testing sources Generating Test Suites with Autotest * Using an Autotest Test Suite:: Autotest and the user * Writing Testsuites:: Autotest macros * testsuite Invocation:: Running ‘testsuite’ scripts * Making testsuite Scripts:: Using autom4te to create ‘testsuite’ Using an Autotest Test Suite * testsuite Scripts:: The concepts of Autotest * Autotest Logs:: Their contents Frequent Autoconf Questions, with answers * Distributing:: Distributing ‘configure’ scripts * Why GNU M4:: Why not use the standard M4? * Bootstrapping:: Autoconf and GNU M4 require each other? * Why Not Imake:: Why GNU uses ‘configure’ instead of Imake * Defining Directories:: Passing ‘datadir’ to program * Autom4te Cache:: What is it? Can I remove it? * Present But Cannot Be Compiled:: Compiler and Preprocessor Disagree * Expanded Before Required:: Expanded Before Required * Debugging:: Debugging ‘configure’ scripts History of Autoconf * Genesis:: Prehistory and naming of ‘configure’ * Exodus:: The plagues of M4 and Perl * Leviticus:: The priestly code of portability arrives * Numbers:: Growth and contributors * Deuteronomy:: Approaching the promises of easy configuration Indices * Environment Variable Index:: Index of environment variables used * Output Variable Index:: Index of variables set in output files * Preprocessor Symbol Index:: Index of C preprocessor symbols defined * Cache Variable Index:: Index of documented cache variables * Autoconf Macro Index:: Index of Autoconf macros * M4 Macro Index:: Index of M4, M4sugar, and M4sh macros * Autotest Macro Index:: Index of Autotest macros * Program & Function Index:: Index of those with portability problems * Concept Index:: General index  File: autoconf.info, Node: Introduction, Next: The GNU Build System, Prev: Top, Up: Top 1 Introduction ************** A physicist, an engineer, and a computer scientist were discussing the nature of God. “Surely a Physicist,” said the physicist, “because early in the Creation, God made Light; and you know, Maxwell’s equations, the dual nature of electromagnetic waves, the relativistic consequences...” “An Engineer!,” said the engineer, “because before making Light, God split the Chaos into Land and Water; it takes a hell of an engineer to handle that big amount of mud, and orderly separation of solids from liquids...” The computer scientist shouted: “And the Chaos, where do you think it was coming from, hmm?” —Anonymous Autoconf is a tool for producing shell scripts that automatically configure software source code packages to adapt to many kinds of Posix-like systems. The configuration scripts produced by Autoconf are independent of Autoconf when they are run, so their users do not need to have Autoconf. The configuration scripts produced by Autoconf require no manual user intervention when run; they do not normally even need an argument specifying the system type. Instead, they individually test for the presence of each feature that the software package they are for might need. (Before each check, they print a one-line message stating what they are checking for, so the user doesn’t get too bored while waiting for the script to finish.) As a result, they deal well with systems that are hybrids or customized from the more common Posix variants. There is no need to maintain files that list the features supported by each release of each variant of Posix. For each software package that Autoconf is used with, it creates a configuration script from a template file that lists the system features that the package needs or can use. After the shell code to recognize and respond to a system feature has been written, Autoconf allows it to be shared by many software packages that can use (or need) that feature. If it later turns out that the shell code needs adjustment for some reason, it needs to be changed in only one place; all of the configuration scripts can be regenerated automatically to take advantage of the updated code. Those who do not understand Autoconf are condemned to reinvent it, poorly. The primary goal of Autoconf is making the _user’s_ life easier; making the _maintainer’s_ life easier is only a secondary goal. Put another way, the primary goal is not to make the generation of ‘configure’ automatic for package maintainers (although patches along that front are welcome, since package maintainers form the user base of Autoconf); rather, the goal is to make ‘configure’ painless, portable, and predictable for the end user of each “autoconfiscated” package. And to this degree, Autoconf is highly successful at its goal—most complaints to the Autoconf list are about difficulties in writing Autoconf input, and not in the behavior of the resulting ‘configure’. Even packages that don’t use Autoconf will generally provide a ‘configure’ script, and the most common complaint about these alternative home-grown scripts is that they fail to meet one or more of the GNU Coding Standards (*note (standards)Configuration::) that users have come to expect from Autoconf-generated ‘configure’ scripts. The Metaconfig package is similar in purpose to Autoconf, but the scripts it produces require manual user intervention, which is quite inconvenient when configuring large source trees. Unlike Metaconfig scripts, Autoconf scripts can support cross-compiling, if some care is taken in writing them. Autoconf does not solve all problems related to making portable software packages—for a more complete solution, it should be used in concert with other GNU build tools like Automake and Libtool. These other tools take on jobs like the creation of a portable, recursive makefile with all of the standard targets, linking of shared libraries, and so on. *Note The GNU Build System::, for more information. Autoconf imposes some restrictions on the names of macros used with ‘#if’ in C programs (*note Preprocessor Symbol Index::). Autoconf requires GNU M4 version 1.4.6 or later in order to generate the scripts. It uses features that some versions of M4, including GNU M4 1.3, do not have. Autoconf works better with GNU M4 version 1.4.14 or later, though this is not required. *Note Autoconf 1::, for information about upgrading from version 1. *Note History::, for the story of Autoconf’s development. *Note FAQ::, for answers to some common questions about Autoconf. See the Autoconf web page (https://www.gnu.org/software/autoconf/) for up-to-date information, details on the mailing lists, pointers to a list of known bugs, etc. Mail suggestions to the Autoconf mailing list . Past suggestions are archived (https://lists.gnu.org/archive/html/autoconf/). Mail bug reports to the Autoconf Bugs mailing list . Past bug reports are archived (https://lists.gnu.org/archive/html/bug-autoconf/). If possible, first check that your bug is not already solved in current development versions, and that it has not been reported yet. Be sure to include all the needed information and a short ‘configure.ac’ that demonstrates the problem. Autoconf’s development tree is accessible via ‘git’; see the Autoconf Summary (https://savannah.gnu.org/projects/autoconf/) for details, or view the actual repository (https://git.savannah.gnu.org/cgit/autoconf.git). Patches relative to the current ‘git’ version can be sent for review to the Autoconf Patches mailing list , with discussion on prior patches archived (https://lists.gnu.org/archive/html/autoconf-patches/); and all commits are posted in the read-only Autoconf Commit mailing list , which is also archived (https://lists.gnu.org/archive/html/autoconf-commit/). Because of its mission, the Autoconf package itself includes only a set of often-used macros that have already demonstrated their usefulness. Nevertheless, if you wish to share your macros, or find existing ones, see the Autoconf Macro Archive (https://www.gnu.org/software/autoconf-archive/), which is kindly run by Peter Simons .  File: autoconf.info, Node: The GNU Build System, Next: Making configure Scripts, Prev: Introduction, Up: Top 2 The GNU Build System ********************** Autoconf solves an important problem—reliable discovery of system-specific build and runtime information—but this is only one piece of the puzzle for the development of portable software. To this end, the GNU project has developed a suite of integrated utilities to finish the job Autoconf started: the GNU build system, whose most important components are Autoconf, Automake, and Libtool. In this chapter, we introduce you to those tools, point you to sources of more information, and try to convince you to use the entire GNU build system for your software. * Menu: * Automake:: Escaping makefile hell * Gnulib:: The GNU portability library * Libtool:: Building libraries portably * Pointers:: More info on the GNU build system  File: autoconf.info, Node: Automake, Next: Gnulib, Up: The GNU Build System 2.1 Automake ============ The ubiquity of ‘make’ means that a makefile is almost the only viable way to distribute automatic build rules for software, but one quickly runs into its numerous limitations. Its lack of support for automatic dependency tracking, recursive builds in subdirectories, reliable timestamps (e.g., for network file systems), and so on, mean that developers must painfully (and often incorrectly) reinvent the wheel for each project. Portability is non-trivial, thanks to the quirks of ‘make’ on many systems. On top of all this is the manual labor required to implement the many standard targets that users have come to expect (‘make install’, ‘make distclean’, ‘make uninstall’, etc.). Since you are, of course, using Autoconf, you also have to insert repetitive code in your ‘Makefile.in’ to recognize ‘@CC@’, ‘@CFLAGS@’, and other substitutions provided by ‘configure’. Into this mess steps “Automake”. Automake allows you to specify your build needs in a ‘Makefile.am’ file with a vastly simpler and more powerful syntax than that of a plain makefile, and then generates a portable ‘Makefile.in’ for use with Autoconf. For example, the ‘Makefile.am’ to build and install a simple “Hello world” program might look like: bin_PROGRAMS = hello hello_SOURCES = hello.c The resulting ‘Makefile.in’ (~400 lines) automatically supports all the standard targets, the substitutions provided by Autoconf, automatic dependency tracking, ‘VPATH’ building, and so on. ‘make’ builds the ‘hello’ program, and ‘make install’ installs it in ‘/usr/local/bin’ (or whatever prefix was given to ‘configure’, if not ‘/usr/local’). The benefits of Automake increase for larger packages (especially ones with subdirectories), but even for small programs the added convenience and portability can be substantial. And that’s not all...  File: autoconf.info, Node: Gnulib, Next: Libtool, Prev: Automake, Up: The GNU Build System 2.2 Gnulib ========== GNU software has a well-deserved reputation for running on many different types of systems. While our primary goal is to write software for the GNU system, many users and developers have been introduced to us through the systems that they were already using. Gnulib is a central location for common GNU code, intended to be shared among free software packages. Its components are typically shared at the source level, rather than being a library that gets built, installed, and linked against. The idea is to copy files from Gnulib into your own source tree. There is no distribution tarball; developers should just grab source modules from the repository. The source files are available online, under various licenses, mostly GNU GPL or GNU LGPL. Gnulib modules typically contain C source code along with Autoconf macros used to configure the source code. For example, the Gnulib ‘stdalign’ module implements a ‘stdalign.h’ header that nearly conforms to C11, even on old-fashioned hosts that lack ‘stdalign.h’. This module contains a source file for the replacement header, along with an Autoconf macro that arranges to use the replacement header on old-fashioned systems. For more information, consult the Gnulib website, .  File: autoconf.info, Node: Libtool, Next: Pointers, Prev: Gnulib, Up: The GNU Build System 2.3 Libtool =========== Often, one wants to build not only programs, but libraries, so that other programs can benefit from the fruits of your labor. Ideally, one would like to produce _shared_ (dynamically linked) libraries, which can be used by multiple programs without duplication on disk or in memory and can be updated independently of the linked programs. Producing shared libraries portably, however, is the stuff of nightmares—each system has its own incompatible tools, compiler flags, and magic incantations. Fortunately, GNU provides a solution: “Libtool”. Libtool handles all the requirements of building shared libraries for you, and at this time seems to be the _only_ way to do so with any portability. It also handles many other headaches, such as: the interaction of Make rules with the variable suffixes of shared libraries, linking reliably with shared libraries before they are installed by the superuser, and supplying a consistent versioning system (so that different versions of a library can be installed or upgraded without breaking binary compatibility). Although Libtool, like Autoconf, can be used without Automake, it is most simply utilized in conjunction with Automake—there, Libtool is used automatically whenever shared libraries are needed, and you need not know its syntax.  File: autoconf.info, Node: Pointers, Prev: Libtool, Up: The GNU Build System 2.4 Pointers ============ Developers who are used to the simplicity of ‘make’ for small projects on a single system might be daunted at the prospect of learning to use Automake and Autoconf. As your software is distributed to more and more users, however, you otherwise quickly find yourself putting lots of effort into reinventing the services that the GNU build tools provide, and making the same mistakes that they once made and overcame. (Besides, since you’re already learning Autoconf, Automake is a piece of cake.) There are a number of places that you can go to for more information on the GNU build tools. − Web The project home pages for Autoconf (https://www.gnu.org/software/autoconf/), Automake (https://www.gnu.org/software/automake/), Gnulib (https://www.gnu.org/software/gnulib/), and Libtool (https://www.gnu.org/software/libtool/). − Automake Manual *Note Automake: (automake)Top, for more information on Automake. − Books The book ‘GNU Autoconf, Automake and Libtool’(1) describes the complete GNU build environment. You can also find the entire book on-line (https://www.sourceware.org/autobook/). ---------- Footnotes ---------- (1) ‘GNU Autoconf, Automake and Libtool’, by G. V. Vaughan, B. Elliston, T. Tromey, and I. L. Taylor. SAMS (originally New Riders), 2000, ISBN 1578701902.  File: autoconf.info, Node: Making configure Scripts, Next: Setup, Prev: The GNU Build System, Up: Top 3 Making ‘configure’ Scripts **************************** The configuration scripts that Autoconf produces are by convention called ‘configure’. When run, ‘configure’ creates several files, replacing configuration parameters in them with appropriate values. The files that ‘configure’ creates are: − one or more ‘Makefile’ files, usually one in each subdirectory of the package (*note Makefile Substitutions::); − optionally, a C header file, the name of which is configurable, containing ‘#define’ directives (*note Configuration Headers::); − a shell script called ‘config.status’ that, when run, recreates the files listed above (*note config.status Invocation::); − an optional shell script normally called ‘config.cache’ (created when using ‘configure --config-cache’) that saves the results of running many of the tests (*note Cache Files::); − a file called ‘config.log’ containing any messages produced by compilers, to help debugging if ‘configure’ makes a mistake. To create a ‘configure’ script with Autoconf, you need to write an Autoconf input file ‘configure.ac’ and run ‘autoconf’ on it. If you write your own feature tests to supplement those that come with Autoconf, you might also write files called ‘aclocal.m4’ and ‘acsite.m4’. If you use a C header file to contain ‘#define’ directives, you might also run ‘autoheader’, and you can distribute the generated file ‘config.h.in’ with the package. Here is a diagram showing how the files that can be used in configuration are produced. Programs that are executed are suffixed by ‘*’. Optional files are enclosed in square brackets (‘[]’). ‘autoconf’ and ‘autoheader’ also read the installed Autoconf macro files (by reading ‘autoconf.m4’). Files used in preparing a software package for distribution, when using just Autoconf: your source files --> [autoscan*] --> [configure.scan] --> configure.ac configure.ac --. | .------> autoconf* -----> configure [aclocal.m4] --+---+ | `-----> [autoheader*] --> [config.h.in] [acsite.m4] ---' Makefile.in Additionally, if you use Automake, the following additional productions come into play: [acinclude.m4] --. | [local macros] --+--> aclocal* --> aclocal.m4 | configure.ac ----' configure.ac --. +--> automake* --> Makefile.in Makefile.am ---' Files used in configuring a software package: .-------------> [config.cache] configure* ------------+-------------> config.log | [config.h.in] -. v .-> [config.h] -. +--> config.status* -+ +--> make* Makefile.in ---' `-> Makefile ---' * Menu: * Writing Autoconf Input:: What to put in an Autoconf input file * autoscan Invocation:: Semi-automatic ‘configure.ac’ writing * ifnames Invocation:: Listing the conditionals in source code * autoconf Invocation:: How to create configuration scripts * autoreconf Invocation:: Remaking multiple ‘configure’ scripts  File: autoconf.info, Node: Writing Autoconf Input, Next: autoscan Invocation, Up: Making configure Scripts 3.1 Writing ‘configure.ac’ ========================== To produce a ‘configure’ script for a software package, create a file called ‘configure.ac’ that contains invocations of the Autoconf macros that test the system features your package needs or can use. Autoconf macros already exist to check for many features; see *note Existing Tests::, for their descriptions. For most other features, you can use Autoconf template macros to produce custom checks; see *note Writing Tests::, for information about them. For especially tricky or specialized features, ‘configure.ac’ might need to contain some hand-crafted shell commands; see *note Portable Shell Programming: Portable Shell. The ‘autoscan’ program can give you a good start in writing ‘configure.ac’ (*note autoscan Invocation::, for more information). Previous versions of Autoconf promoted the name ‘configure.in’, which is somewhat ambiguous (the tool needed to process this file is not described by its extension), and introduces a slight confusion with ‘config.h.in’ and so on (for which ‘.in’ means “to be processed by ‘configure’”). Using ‘configure.ac’ is now preferred, while the use of ‘configure.in’ will cause warnings from ‘autoconf’. * Menu: * Shell Script Compiler:: Autoconf as solution of a problem * Autoconf Language:: Programming in Autoconf * Autoconf Input Layout:: Standard organization of ‘configure.ac’  File: autoconf.info, Node: Shell Script Compiler, Next: Autoconf Language, Up: Writing Autoconf Input 3.1.1 A Shell Script Compiler ----------------------------- Just as for any other computer language, in order to properly program ‘configure.ac’ in Autoconf you must understand _what_ problem the language tries to address and _how_ it does so. The problem Autoconf addresses is that the world is a mess. After all, you are using Autoconf in order to have your package compile easily on all sorts of different systems, some of them being extremely hostile. Autoconf itself bears the price for these differences: ‘configure’ must run on all those systems, and thus ‘configure’ must limit itself to their lowest common denominator of features. Naturally, you might then think of shell scripts; who needs ‘autoconf’? A set of properly written shell functions is enough to make it easy to write ‘configure’ scripts by hand. Sigh! Unfortunately, even in 2008, where shells without any function support are far and few between, there are pitfalls to avoid when making use of them. Also, finding a Bourne shell that accepts shell functions is not trivial, even though there is almost always one on interesting porting targets. So, what is really needed is some kind of compiler, ‘autoconf’, that takes an Autoconf program, ‘configure.ac’, and transforms it into a portable shell script, ‘configure’. How does ‘autoconf’ perform this task? There are two obvious possibilities: creating a brand new language or extending an existing one. The former option is attractive: all sorts of optimizations could easily be implemented in the compiler and many rigorous checks could be performed on the Autoconf program (e.g., rejecting any non-portable construct). Alternatively, you can extend an existing language, such as the ‘sh’ (Bourne shell) language. Autoconf does the latter: it is a layer on top of ‘sh’. It was therefore most convenient to implement ‘autoconf’ as a macro expander: a program that repeatedly performs “macro expansions” on text input, replacing macro calls with macro bodies and producing a pure ‘sh’ script in the end. Instead of implementing a dedicated Autoconf macro expander, it is natural to use an existing general-purpose macro language, such as M4, and implement the extensions as a set of M4 macros.  File: autoconf.info, Node: Autoconf Language, Next: Autoconf Input Layout, Prev: Shell Script Compiler, Up: Writing Autoconf Input 3.1.2 The Autoconf Language --------------------------- The Autoconf language differs from many other computer languages because it treats actual code the same as plain text. Whereas in C, for instance, data and instructions have different syntactic status, in Autoconf their status is rigorously the same. Therefore, we need a means to distinguish literal strings from text to be expanded: quotation. When calling macros that take arguments, there must not be any white space between the macro name and the open parenthesis. AC_INIT ([oops], [1.0]) # incorrect AC_INIT([hello], [1.0]) # good Arguments should be enclosed within the quote characters ‘[’ and ‘]’, and be separated by commas. Any leading blanks or newlines in arguments are ignored, unless they are quoted. You should always quote an argument that might contain a macro name, comma, parenthesis, or a leading blank or newline. This rule applies recursively for every macro call, including macros called from other macros. For more details on quoting rules, see *note Programming in M4::. For instance: AC_CHECK_HEADER([stdio.h], [AC_DEFINE([HAVE_STDIO_H], [1], [Define to 1 if you have .])], [AC_MSG_ERROR([sorry, can't do anything for you])]) is quoted properly. You may safely simplify its quotation to: AC_CHECK_HEADER([stdio.h], [AC_DEFINE([HAVE_STDIO_H], 1, [Define to 1 if you have .])], [AC_MSG_ERROR([sorry, can't do anything for you])]) because ‘1’ cannot contain a macro call. Here, the argument of ‘AC_MSG_ERROR’ must be quoted; otherwise, its comma would be interpreted as an argument separator. Also, the second and third arguments of ‘AC_CHECK_HEADER’ must be quoted, since they contain macro calls. The three arguments ‘HAVE_STDIO_H’, ‘stdio.h’, and ‘Define to 1 if you have .’ do not need quoting, but if you unwisely defined a macro with a name like ‘Define’ or ‘stdio’ then they would need quoting. Cautious Autoconf users would keep the quotes, but many Autoconf users find such precautions annoying, and would rewrite the example as follows: AC_CHECK_HEADER(stdio.h, [AC_DEFINE(HAVE_STDIO_H, 1, [Define to 1 if you have .])], [AC_MSG_ERROR([sorry, can't do anything for you])]) This is safe, so long as you adopt good naming conventions and do not define macros with names like ‘HAVE_STDIO_H’, ‘stdio’, or ‘h’. Though it is also safe here to omit the quotes around ‘Define to 1 if you have .’ this is not recommended, as message strings are more likely to inadvertently contain commas. The following example is wrong and dangerous, as it is underquoted: AC_CHECK_HEADER(stdio.h, AC_DEFINE(HAVE_STDIO_H, 1, Define to 1 if you have .), AC_MSG_ERROR([sorry, can't do anything for you])) In other cases, you may want to use text that also resembles a macro call. You must quote that text (whether just the potential problem, or the entire line) even when it is not passed as a macro argument; and you may also have to use ‘m4_pattern_allow’ (*note Forbidden Patterns::), to declare your intention that the resulting configure file will have a literal that resembles what would otherwise be reserved for a macro name. For example: dnl Simulate a possible future autoconf macro m4_define([AC_DC], [oops]) dnl Underquoted: echo "Hard rock was here! --AC_DC" dnl Correctly quoted: m4_pattern_allow([AC_DC]) echo "Hard rock was here! --[AC_DC]" [echo "Hard rock was here! --AC_DC"] which results in this text in ‘configure’: echo "Hard rock was here! --oops" echo "Hard rock was here! --AC_DC" echo "Hard rock was here! --AC_DC" When you use the same text in a macro argument, you must therefore have an extra quotation level (since one is stripped away by the macro substitution). In general, then, it is a good idea to _use double quoting for all literal string arguments_, either around just the problematic portions, or over the entire argument: m4_pattern_allow([AC_DC]) AC_MSG_WARN([[AC_DC] stinks --Iron Maiden]) AC_MSG_WARN([[AC_DC stinks --Iron Maiden]]) It is also possible to avoid the problematic patterns in the first place, by the use of additional escaping (either a quadrigraph, or creative shell constructs), in which case it is no longer necessary to use ‘m4_pattern_allow’: echo "Hard rock was here! --AC""_DC" AC_MSG_WARN([[AC@&t@_DC stinks --Iron Maiden]]) You are now able to understand one of the constructs of Autoconf that has been continually misunderstood... The rule of thumb is that _whenever you expect macro expansion, expect quote expansion_; i.e., expect one level of quotes to be lost. For instance: AC_COMPILE_IFELSE(AC_LANG_SOURCE([char b[10];]), [], [AC_MSG_ERROR([you lose])]) is incorrect: here, the first argument of ‘AC_LANG_SOURCE’ is ‘char b[10];’ and is expanded once, which results in ‘char b10;’; and the ‘AC_LANG_SOURCE’ is also expanded prior to being passed to ‘AC_COMPILE_IFELSE’. (There was an idiom common in Autoconf’s past to address this issue via the M4 ‘changequote’ primitive, but do not use it!) Let’s take a closer look: the author meant the first argument to be understood as a literal, and therefore it must be quoted twice; likewise, the intermediate ‘AC_LANG_SOURCE’ macro should be quoted once so that it is only expanded after the rest of the body of ‘AC_COMPILE_IFELSE’ is in place: AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char b[10];]])], [], [AC_MSG_ERROR([you lose])]) Voilà, you actually produce ‘char b[10];’ this time! On the other hand, descriptions (e.g., the last parameter of ‘AC_DEFINE’ or ‘AS_HELP_STRING’) are not literals—they are subject to line breaking, for example—and should not be double quoted. Even if these descriptions are short and are not actually broken, double quoting them yields weird results. Some macros take optional arguments, which this documentation represents as [ARG] (not to be confused with the quote characters). You may just leave them empty, or use ‘[]’ to make the emptiness of the argument explicit, or you may simply omit the trailing commas. The three lines below are equivalent: AC_CHECK_HEADERS([stdio.h], [], [], []) AC_CHECK_HEADERS([stdio.h],,,) AC_CHECK_HEADERS([stdio.h]) It is best to put each macro call on its own line in ‘configure.ac’. Most of the macros don’t add extra newlines; they rely on the newline after the macro call to terminate the commands. This approach makes the generated ‘configure’ script a little easier to read by not inserting lots of blank lines. It is generally safe to set shell variables on the same line as a macro call, because the shell allows assignments without intervening newlines. You can include comments in ‘configure.ac’ files by starting them with the ‘#’. For example, it is helpful to begin ‘configure.ac’ files with a line like this: # Process this file with autoconf to produce a configure script.  File: autoconf.info, Node: Autoconf Input Layout, Prev: Autoconf Language, Up: Writing Autoconf Input 3.1.3 Standard ‘configure.ac’ Layout ------------------------------------ The order in which ‘configure.ac’ calls the Autoconf macros is not important, with a few exceptions. Every ‘configure.ac’ must contain a call to ‘AC_INIT’ before the checks, and a call to ‘AC_OUTPUT’ at the end (*note Output::). Additionally, some macros rely on other macros having been called first, because they check previously set values of some variables to decide what to do. These macros are noted in the individual descriptions (*note Existing Tests::), and they also warn you when ‘configure’ is created if they are called out of order. To encourage consistency, here is a suggested order for calling the Autoconf macros. Generally speaking, the things near the end of this list are those that could depend on things earlier in it. For example, library functions could be affected by types and libraries. Autoconf requirements ‘AC_INIT(PACKAGE, VERSION, BUG-REPORT-ADDRESS)’ information on the package checks for programs checks for libraries checks for header files checks for types checks for structures checks for compiler characteristics checks for library functions checks for system services ‘AC_CONFIG_FILES([FILE...])’ ‘AC_OUTPUT’  File: autoconf.info, Node: autoscan Invocation, Next: ifnames Invocation, Prev: Writing Autoconf Input, Up: Making configure Scripts 3.2 Using ‘autoscan’ to Create ‘configure.ac’ ============================================= The ‘autoscan’ program can help you create and/or maintain a ‘configure.ac’ file for a software package. ‘autoscan’ examines source files in the directory tree rooted at a directory given as a command line argument, or the current directory if none is given. It searches the source files for common portability problems and creates a file ‘configure.scan’ which is a preliminary ‘configure.ac’ for that package, and checks a possibly existing ‘configure.ac’ for completeness. When using ‘autoscan’ to create a ‘configure.ac’, you should manually examine ‘configure.scan’ before renaming it to ‘configure.ac’; it probably needs some adjustments. Occasionally, ‘autoscan’ outputs a macro in the wrong order relative to another macro, so that ‘autoconf’ produces a warning; you need to move such macros manually. Also, if you want the package to use a configuration header file, you must add a call to ‘AC_CONFIG_HEADERS’ (*note Configuration Headers::). You might also have to change or add some ‘#if’ directives to your program in order to make it work with Autoconf (*note ifnames Invocation::, for information about a program that can help with that job). When using ‘autoscan’ to maintain a ‘configure.ac’, simply consider adding its suggestions. The file ‘autoscan.log’ contains detailed information on why a macro is requested. ‘autoscan’ uses several data files (installed along with Autoconf) to determine which macros to output when it finds particular symbols in a package’s source files. These data files all have the same format: each line consists of a symbol, one or more blanks, and the Autoconf macro to output if that symbol is encountered. Lines starting with ‘#’ are comments. ‘autoscan’ accepts the following options: ‘--help’ ‘-h’ Print a summary of the command line options and exit. ‘--version’ ‘-V’ Print the version number of Autoconf and exit. ‘--verbose’ ‘-v’ Print the names of the files it examines and the potentially interesting symbols it finds in them. This output can be voluminous. ‘--debug’ ‘-d’ Don’t remove temporary files. ‘--include=DIR’ ‘-I DIR’ Append DIR to the include path. Multiple invocations accumulate. ‘--prepend-include=DIR’ ‘-B DIR’ Prepend DIR to the include path. Multiple invocations accumulate.  File: autoconf.info, Node: ifnames Invocation, Next: autoconf Invocation, Prev: autoscan Invocation, Up: Making configure Scripts 3.3 Using ‘ifnames’ to List Conditionals ======================================== ‘ifnames’ can help you write ‘configure.ac’ for a software package. It prints the identifiers that the package already uses in C preprocessor conditionals. If a package has already been set up to have some portability, ‘ifnames’ can thus help you figure out what its ‘configure’ needs to check for. It may help fill in some gaps in a ‘configure.ac’ generated by ‘autoscan’ (*note autoscan Invocation::). ‘ifnames’ scans all of the C source files named on the command line (or the standard input, if none are given) and writes to the standard output a sorted list of all the identifiers that appear in those files in ‘#if’, ‘#elif’, ‘#ifdef’, or ‘#ifndef’ directives. It prints each identifier on a line, followed by a space-separated list of the files in which that identifier occurs. ‘ifnames’ accepts the following options: ‘--help’ ‘-h’ Print a summary of the command line options and exit. ‘--version’ ‘-V’ Print the version number of Autoconf and exit.  File: autoconf.info, Node: autoconf Invocation, Next: autoreconf Invocation, Prev: ifnames Invocation, Up: Making configure Scripts 3.4 Using ‘autoconf’ to Create ‘configure’ ========================================== To create ‘configure’ from ‘configure.ac’, run the ‘autoconf’ program with no arguments. ‘autoconf’ processes ‘configure.ac’ with the M4 macro processor, using the Autoconf macros. If you give ‘autoconf’ an argument, it reads that file instead of ‘configure.ac’ and writes the configuration script to the standard output instead of to ‘configure’. If you give ‘autoconf’ the argument ‘-’, it reads from the standard input instead of ‘configure.ac’ and writes the configuration script to the standard output. The Autoconf macros are defined in several files. Some of the files are distributed with Autoconf; ‘autoconf’ reads them first. Then it looks for the optional file ‘acsite.m4’ in the directory that contains the distributed Autoconf macro files, and for the optional file ‘aclocal.m4’ in the current directory. Those files can contain your site’s or the package’s own Autoconf macro definitions (*note Writing Autoconf Macros::, for more information). If a macro is defined in more than one of the files that ‘autoconf’ reads, the last definition it reads overrides the earlier ones. ‘autoconf’ accepts the following options: ‘--help’ ‘-h’ Print a summary of the command line options and exit. ‘--version’ ‘-V’ Print the version number of Autoconf and exit. ‘--verbose’ ‘-v’ Report processing steps. ‘--debug’ ‘-d’ Don’t remove the temporary files. ‘--force’ ‘-f’ Remake ‘configure’ even if newer than its input files. ‘--include=DIR’ ‘-I DIR’ Append DIR to the include path. Multiple invocations accumulate. ‘--prepend-include=DIR’ ‘-B DIR’ Prepend DIR to the include path. Multiple invocations accumulate. ‘--output=FILE’ ‘-o FILE’ Save output (script or trace) to FILE. The file ‘-’ stands for the standard output. ‘--warnings=CATEGORY[,CATEGORY...]’ ‘-WCATEGORY[,CATEGORY...]’ Enable or disable warnings related to each CATEGORY. *Note m4_warn::, for a comprehensive list of categories. Special values include: ‘all’ Enable all categories of warnings. ‘none’ Disable all categories of warnings. ‘error’ Treat all warnings as errors. ‘no-CATEGORY’ Disable warnings falling into CATEGORY. The enviroment variable ‘WARNINGS’ may also be set to a comma-separated list of warning categories to enable or disable. It is interpreted exactly the same way as the argument of ‘--warnings’, but unknown categories are silently ignored. The command line takes precedence; for instance, if ‘WARNINGS’ is set to ‘obsolete’, but ‘-Wnone’ is given on the command line, no warnings will be issued. Some categories of warnings are on by default. Again, for details see *note m4_warn::. ‘--trace=MACRO[:FORMAT]’ ‘-t MACRO[:FORMAT]’ Do not create the ‘configure’ script, but list the calls to MACRO according to the FORMAT. Multiple ‘--trace’ arguments can be used to list several macros. Multiple ‘--trace’ arguments for a single macro are not cumulative; instead, you should just make FORMAT as long as needed. The FORMAT is a regular string, with newlines if desired, and several special escape codes. It defaults to ‘$f:$l:$n:$%’; see *note autom4te Invocation::, for details on the FORMAT. ‘--initialization’ ‘-i’ By default, ‘--trace’ does not trace the initialization of the Autoconf macros (typically the ‘AC_DEFUN’ definitions). This results in a noticeable speedup, but can be disabled by this option. It is often necessary to check the content of a ‘configure.ac’ file, but parsing it yourself is extremely fragile and error-prone. It is suggested that you rely upon ‘--trace’ to scan ‘configure.ac’. For instance, to find the list of variables that are substituted, use: $ autoconf -t AC_SUBST configure.ac:2:AC_SUBST:ECHO_C configure.ac:2:AC_SUBST:ECHO_N configure.ac:2:AC_SUBST:ECHO_T More traces deleted The example below highlights the difference between ‘$@’, ‘$*’, and ‘$%’. $ cat configure.ac AC_DEFINE(This, is, [an [example]]) $ autoconf -t 'AC_DEFINE:@: $@ *: $* %: $%' @: [This],[is],[an [example]] *: This,is,an [example] %: This:is:an [example] The FORMAT gives you a lot of freedom: $ autoconf -t 'AC_SUBST:$$ac_subst{"$1"} = "$f:$l";' $ac_subst{"ECHO_C"} = "configure.ac:2"; $ac_subst{"ECHO_N"} = "configure.ac:2"; $ac_subst{"ECHO_T"} = "configure.ac:2"; More traces deleted A long SEPARATOR can be used to improve the readability of complex structures, and to ease their parsing (for instance when no single character is suitable as a separator): $ autoconf -t 'AM_MISSING_PROG:${|:::::|}*' ACLOCAL|:::::|aclocal|:::::|$missing_dir AUTOCONF|:::::|autoconf|:::::|$missing_dir AUTOMAKE|:::::|automake|:::::|$missing_dir More traces deleted  File: autoconf.info, Node: autoreconf Invocation, Prev: autoconf Invocation, Up: Making configure Scripts 3.5 Using ‘autoreconf’ to Update ‘configure’ Scripts ==================================================== Installing the various components of the GNU Build System can be tedious: running ‘autopoint’ for Gettext, ‘automake’ for ‘Makefile.in’ etc. in each directory. It may be needed either because some tools such as ‘automake’ have been updated on your system, or because some of the sources such as ‘configure.ac’ have been updated, or finally, simply in order to install the GNU Build System in a fresh tree. ‘autoreconf’ runs ‘autoconf’, ‘autoheader’, ‘aclocal’, ‘automake’, ‘libtoolize’, ‘intltoolize’, ‘gtkdocize’, and ‘autopoint’ (when appropriate) repeatedly to update the GNU Build System in the specified directories and their subdirectories (*note Subdirectories::). By default, it only remakes those files that are older than their sources. The environment variables ‘AUTOM4TE’, ‘AUTOCONF’, ‘AUTOHEADER’, ‘AUTOMAKE’, ‘ACLOCAL’, ‘AUTOPOINT’, ‘LIBTOOLIZE’, ‘INTLTOOLIZE’, ‘GTKDOCIZE’, ‘M4’, and ‘MAKE’ may be used to override the invocation of the respective tools. If you install a new version of some tool, you can make ‘autoreconf’ remake _all_ of the files by giving it the ‘--force’ option. *Note Automatic Remaking::, for Make rules to automatically rebuild ‘configure’ scripts when their source files change. That method handles the timestamps of configuration header templates properly, but does not pass ‘--autoconf-dir=DIR’ or ‘--localdir=DIR’. Gettext supplies the ‘autopoint’ command to add translation infrastructure to a source package. If you use ‘autopoint’, your ‘configure.ac’ should invoke ‘AM_GNU_GETTEXT’ and one of ‘AM_GNU_GETTEXT_VERSION(GETTEXT-VERSION)’ or ‘AM_GNU_GETTEXT_REQUIRE_VERSION(MIN-GETTEXT-VERSION)’. *Note Invoking the ‘autopoint’ Program: (gettext)autopoint Invocation, for further details. ‘autoreconf’ accepts the following options: ‘--help’ ‘-h’ Print a summary of the command line options and exit. ‘--version’ ‘-V’ Print the version number of Autoconf and exit. ‘--verbose’ ‘-v’ Print the name of each directory ‘autoreconf’ examines and the commands it runs. If given two or more times, pass ‘--verbose’ to subordinate tools that support it. ‘--debug’ ‘-d’ Don’t remove the temporary files. ‘--force’ ‘-f’ Consider all generated and standard auxiliary files to be obsolete. This remakes even ‘configure’ scripts and configuration headers that are newer than their input files (‘configure.ac’ and, if present, ‘aclocal.m4’). If deemed appropriate, this option triggers calls to ‘automake --force-missing’. Passing both ‘--force’ and ‘--install’ to ‘autoreconf’ will in turn undo any customizations to standard files. Note that the macro ‘AM_INIT_AUTOMAKE’ has some options which change the set of files considered to be standard. ‘--install’ ‘-i’ Install any missing standard auxiliary files in the package. By default, files are copied; this can be changed with ‘--symlink’. If deemed appropriate, this option triggers calls to ‘automake --add-missing’, ‘libtoolize’, ‘autopoint’, etc. ‘--no-recursive’ Do not rebuild files in subdirectories to configure (see *note Subdirectories::, macro ‘AC_CONFIG_SUBDIRS’). ‘--symlink’ ‘-s’ When used with ‘--install’, install symbolic links to the missing auxiliary files instead of copying them. ‘--make’ ‘-m’ When the directories were configured, update the configuration by running ‘./config.status --recheck && ./config.status’, and then run ‘make’. ‘--include=DIR’ ‘-I DIR’ Append DIR to the include path. Multiple invocations accumulate. Passed on to ‘aclocal’, ‘autoconf’ and ‘autoheader’ internally. ‘--prepend-include=DIR’ ‘-B DIR’ Prepend DIR to the include path. Multiple invocations accumulate. Passed on to ‘autoconf’ and ‘autoheader’ internally. ‘--warnings=CATEGORY[,CATEGORY...]’ ‘-WCATEGORY[,CATEGORY...]’ Enable or disable warnings related to each CATEGORY. *Note m4_warn::, for a comprehensive list of categories. Special values include: ‘all’ Enable all categories of warnings. ‘none’ Disable all categories of warnings. ‘error’ Treat all warnings as errors. ‘no-CATEGORY’ Disable warnings falling into CATEGORY. The enviroment variable ‘WARNINGS’ may also be set to a comma-separated list of warning categories to enable or disable. It is interpreted exactly the same way as the argument of ‘--warnings’, but unknown categories are silently ignored. The command line takes precedence; for instance, if ‘WARNINGS’ is set to ‘obsolete’, but ‘-Wnone’ is given on the command line, no warnings will be issued. Some categories of warnings are on by default. Again, for details see *note m4_warn::. If you want ‘autoreconf’ to pass flags that are not listed here on to ‘aclocal’, set ‘ACLOCAL_AMFLAGS’ in your ‘Makefile.am’. Due to a limitation in the Autoconf implementation these flags currently must be set on a single line in ‘Makefile.am’, without any backslash-newlines. Also, be aware that future Automake releases might start flagging ‘ACLOCAL_AMFLAGS’ as obsolescent, or even remove support for it.  File: autoconf.info, Node: Setup, Next: Existing Tests, Prev: Making configure Scripts, Up: Top 4 Initialization and Output Files ********************************* Autoconf-generated ‘configure’ scripts need some information about how to initialize, such as how to find the package’s source files and about the output files to produce. The following sections describe the initialization and the creation of output files. * Menu: * Initializing configure:: Option processing etc. * Versioning:: Dealing with Autoconf versions * Notices:: Copyright, version numbers in ‘configure’ * Input:: Where Autoconf should find files * Output:: Outputting results from the configuration * Configuration Actions:: Preparing the output based on results * Configuration Files:: Creating output files * Makefile Substitutions:: Using output variables in makefiles * Configuration Headers:: Creating a configuration header file * Configuration Commands:: Running arbitrary instantiation commands * Configuration Links:: Links depending on the configuration * Subdirectories:: Configuring independent packages together * Default Prefix:: Changing the default installation prefix  File: autoconf.info, Node: Initializing configure, Next: Versioning, Up: Setup 4.1 Initializing ‘configure’ ============================ Every ‘configure’ script must call ‘AC_INIT’ before doing anything else that produces output. Calls to silent macros, such as ‘AC_DEFUN’, may also occur prior to ‘AC_INIT’, although these are generally used via ‘aclocal.m4’, since that is implicitly included before the start of ‘configure.ac’. The only other required macro is ‘AC_OUTPUT’ (*note Output::). -- Macro: AC_INIT (PACKAGE, VERSION, [BUG-REPORT], [TARNAME], [URL]) Process any command-line arguments and perform initialization and verification. Set the name of the PACKAGE and its VERSION. These are typically used in ‘--version’ support, including that of ‘configure’. The optional argument BUG-REPORT should be the email to which users should send bug reports. The package TARNAME differs from PACKAGE: the latter designates the full package name (e.g., ‘GNU Autoconf’), while the former is meant for distribution tar ball names (e.g., ‘autoconf’). It defaults to PACKAGE with ‘GNU ’ stripped, lower-cased, and all characters other than alphanumerics and underscores are changed to ‘-’. If provided, URL should be the home page for the package. Leading and trailing whitespace is stripped from all the arguments to ‘AC_INIT’, and interior whitespace is collapsed to a single space. This means that, for instance, if you want to put several email addresses in BUG-REPORT, you can put each one on its own line: # We keep having problems with the mail hosting for # gnomovision.example, so give people an alternative. AC_INIT([Gnomovision], [17.0.1], [ bugs@gnomovision.example or gnomo-bugs@reliable-email.example ]) The arguments to ‘AC_INIT’ may be computed by M4, when ‘autoconf’ is run. For instance, if you want to include the package’s version number in the TARNAME, but you don’t want to repeat it, you can use a helper macro: m4_define([gnomo_VERSION], [17.0.1]) AC_INIT([Gnomovision], m4_defn([gnomo_VERSION]), [bugs@gnomovision.example], [gnomo-]m4_defn([gnomo_VERSION])) This uses ‘m4_defn’ to produce the expansion of ‘gnomo_VERSION’ _as a quoted string_, so that if there happen to be any more M4 macro names in ‘gnomo_VERSION’, they will not be expanded. *Note Renaming Macros: (m4)Defn. Continuing this example, if you don’t want to embed the version number in ‘configure.ac’ at all, you can use ‘m4_esyscmd’ to look it up somewhere else when ‘autoconf’ is run: m4_define([gnomo_VERSION], m4_esyscmd([build-aux/git-version-gen .tarball-version])) AC_INIT([Gnomovision], m4_defn([gnomo_VERSION]), [bugs@gnomovision.example], [gnomo-]m4_defn([gnomo_VERSION])) This uses the utility script ‘git-version-gen’ to look up the package’s version in its version control metadata. This script is part of Gnulib (*note Gnulib::). The arguments to ‘AC_INIT’ are written into ‘configure’ in several different places. Therefore, we strongly recommend that you write any M4 logic in ‘AC_INIT’ arguments to be evaluated _before_ ‘AC_INIT’ itself is evaluated. For instance, in the above example, the second argument to ‘m4_define’ is _not_ quoted, so the ‘m4_esyscmd’ is evaluated only once, and ‘gnomo_VERSION’ is defined to the output of the command. If the second argument to ‘m4_define’ were quoted, ‘m4_esyscmd’ would be evaluated each time the VERSION or TARNAME arguments were written to ‘configure’, and the command would be run repeatedly. In some of the places where the arguments to ‘AC_INIT’ are used, within ‘configure’, shell evaluation cannot happen. Therefore, the arguments to ‘AC_INIT’ may _not_ be computed when ‘configure’ is run. If they contain any construct that isn’t always treated as literal by the shell (e.g. variable expansions), ‘autoconf’ will issue an error. The TARNAME argument is used to construct filenames. It should not contain wildcard characters, white space, or anything else that could be troublesome as part of a file or directory name. Some of M4’s active characters (notably parentheses, square brackets, ‘,’ and ‘#’) commonly appear in URLs and lists of email addresses. If any of these characters appear in an argument to AC_INIT, that argument will probably need to be double-quoted to avoid errors and mistranscriptions. *Note M4 Quotation::. The following M4 macros (e.g., ‘AC_PACKAGE_NAME’), output variables (e.g., ‘PACKAGE_NAME’), and preprocessor symbols (e.g., ‘PACKAGE_NAME’), are defined by ‘AC_INIT’: ‘AC_PACKAGE_NAME’, ‘PACKAGE_NAME’ Exactly PACKAGE. ‘AC_PACKAGE_TARNAME’, ‘PACKAGE_TARNAME’ Exactly TARNAME, possibly generated from PACKAGE. ‘AC_PACKAGE_VERSION’, ‘PACKAGE_VERSION’ Exactly VERSION. ‘AC_PACKAGE_STRING’, ‘PACKAGE_STRING’ Exactly ‘PACKAGE VERSION’. ‘AC_PACKAGE_BUGREPORT’, ‘PACKAGE_BUGREPORT’ Exactly BUG-REPORT, if one was provided. Typically an email address, or URL to a bug management web page. ‘AC_PACKAGE_URL’, ‘PACKAGE_URL’ Exactly URL, if one was provided. If URL was empty, but PACKAGE begins with ‘GNU ’, then this defaults to ‘https://www.gnu.org/software/TARNAME/’, otherwise, no URL is assumed. If your ‘configure’ script does its own option processing, it should inspect ‘$@’ or ‘$*’ immediately after calling ‘AC_INIT’, because other Autoconf macros liberally use the ‘set’ command to process strings, and this has the side effect of updating ‘$@’ and ‘$*’. However, we suggest that you use standard macros like ‘AC_ARG_ENABLE’ instead of attempting to implement your own option processing. *Note Site Configuration::.  File: autoconf.info, Node: Versioning, Next: Notices, Prev: Initializing configure, Up: Setup 4.2 Dealing with Autoconf versions ================================== The following optional macros can be used to help choose the minimum version of Autoconf that can successfully compile a given ‘configure.ac’. -- Macro: AC_PREREQ (VERSION) Ensure that a recent enough version of Autoconf is being used. If the version of Autoconf being used to create ‘configure’ is earlier than VERSION, print an error message to the standard error output and exit with failure (exit status is 63). For example: AC_PREREQ([2.71]) This macro may be used before ‘AC_INIT’. -- Macro: AC_AUTOCONF_VERSION This macro was introduced in Autoconf 2.62. It identifies the version of Autoconf that is currently parsing the input file, in a format suitable for ‘m4_version_compare’ (*note m4_version_compare::); in other words, for this release of Autoconf, its value is ‘2.71’. One potential use of this macro is for writing conditional fallbacks based on when a feature was added to Autoconf, rather than using ‘AC_PREREQ’ to require the newer version of Autoconf. However, remember that the Autoconf philosophy favors feature checks over version checks. You should not expand this macro directly; use ‘m4_defn([AC_AUTOCONF_VERSION])’ instead. This is because some users might have a beta version of Autoconf installed, with arbitrary letters included in its version string. This means it is possible for the version string to contain the name of a defined macro, such that expanding ‘AC_AUTOCONF_VERSION’ would trigger the expansion of that macro during rescanning, and change the version string to be different than what you intended to check.  File: autoconf.info, Node: Notices, Next: Input, Prev: Versioning, Up: Setup 4.3 Notices in ‘configure’ ========================== The following macros manage version numbers for ‘configure’ scripts. Using them is optional. -- Macro: AC_COPYRIGHT (COPYRIGHT-NOTICE) State that, in addition to the Free Software Foundation’s copyright on the Autoconf macros, parts of your ‘configure’ are covered by the COPYRIGHT-NOTICE. The COPYRIGHT-NOTICE shows up in both the head of ‘configure’ and in ‘configure --version’. -- Macro: AC_REVISION (REVISION-INFO) Copy revision stamp REVISION-INFO into the ‘configure’ script, with any dollar signs or double-quotes removed. This macro lets you put a revision stamp from ‘configure.ac’ into ‘configure’ without RCS or CVS changing it when you check in ‘configure’. That way, you can determine easily which revision of ‘configure.ac’ a particular ‘configure’ corresponds to. For example, this line in ‘configure.ac’: AC_REVISION([$Revision: 1.30 $]) produces this in ‘configure’: #!/bin/sh # From configure.ac Revision: 1.30  File: autoconf.info, Node: Input, Next: Output, Prev: Notices, Up: Setup 4.4 Configure Input: Source Code, Macros, and Auxiliary Files ============================================================= The following macros help you manage the contents of your source tree. -- Macro: AC_CONFIG_SRCDIR (UNIQUE-FILE-IN-SOURCE-DIR) Distinguish this package’s source directory from other source directories that might happen to exist in the file system. UNIQUE-FILE-IN-SOURCE-DIR should name a file that is unique to this package. ‘configure’ will verify that this file exists in ‘SRCDIR’, before it runs any other checks. Use of this macro is strongly recommended. It protects against people accidentally specifying the wrong directory with ‘--srcdir’. *Note configure Invocation::, for more information. Packages that use ‘aclocal’ to generate ‘aclocal.m4’ should declare where local macros can be found using ‘AC_CONFIG_MACRO_DIRS’. -- Macro: AC_CONFIG_MACRO_DIRS (DIR1 [DIR2 ... DIRN]) -- Macro: AC_CONFIG_MACRO_DIR (DIR) Specify the given directories as the location of additional local Autoconf macros. These macros are intended for use by commands like ‘autoreconf’ or ‘aclocal’ that trace macro calls; they should be called directly from ‘configure.ac’ so that tools that install macros for ‘aclocal’ can find the macros’ declarations. Tools that want to learn which directories have been selected should trace ‘AC_CONFIG_MACRO_DIR_TRACE’, which will be called once per directory. AC_CONFIG_MACRO_DIRS is the preferred form, and can be called multiple times and with multiple arguments; in such cases, directories in earlier calls are expected to be searched before directories in later calls, and directories appearing in the same call are expected to be searched in the order in which they appear in the call. For historical reasons, the macro AC_CONFIG_MACRO_DIR can also be used once, if it appears first, for tools such as older ‘libtool’ that weren’t prepared to handle multiple directories. For example, a usage like AC_CONFIG_MACRO_DIR([dir1]) AC_CONFIG_MACRO_DIRS([dir2]) AC_CONFIG_MACRO_DIRS([dir3 dir4]) will cause the trace of AC_CONFIG_MACRO_DIR_TRACE to appear four times, and should cause the directories to be searched in this order: ‘dir1 dir2 dir3 dir4’. Note that if you use ‘aclocal’ from an Automake release prior to 1.13 to generate ‘aclocal.m4’, you must also set ‘ACLOCAL_AMFLAGS = -I DIR1 [-I DIR2 ... -I DIRN]’ in your top-level ‘Makefile.am’. Due to a limitation in the Autoconf implementation of ‘autoreconf’, these include directives currently must be set on a single line in ‘Makefile.am’, without any backslash-newlines. Some Autoconf macros require auxiliary scripts. ‘AC_PROG_INSTALL’ and ‘AC_PROG_MKDIR_P’ (*note Particular Programs::) require a fallback implementation of ‘install’ called ‘install-sh’, and the ‘AC_CANONICAL’ macros (*note Manual Configuration::) require the system-identification scripts ‘config.sub’ and ‘config.guess’. Third-party tools, such as Automake and Libtool, may require additional auxiliary scripts. By default, ‘configure’ looks for these scripts next to itself, in ‘SRCDIR’. For convenience when working with subdirectories with their own configure scripts (*note Subdirectories::), if the scripts are not in ‘SRCDIR’ it will also look in ‘SRCDIR/..’ and ‘SRCDIR/../..’. All of the scripts must be found in the same directory. If these default locations are not adequate, or simply to reduce clutter at the top level of the source tree, packages can use ‘AC_CONFIG_AUX_DIR’ to declare where to look for auxiliary scripts. -- Macro: AC_CONFIG_AUX_DIR (DIR) Look for auxiliary scripts in DIR. Normally, DIR should be a relative path, which is taken as relative to ‘SRCDIR’. If DIR is an absolute path or contains shell variables, however, it is used as-is. When the goal of using ‘AC_CONFIG_AUX_DIR’ is to reduce clutter at the top level of the source tree, the conventional name for DIR is ‘build-aux’. If you need portability to DOS variants, do not name the auxiliary directory ‘aux’. *Note File System Conventions::. -- Macro: AC_REQUIRE_AUX_FILE (FILE) Declare that FILE is an auxiliary script needed by this configure script, and set the shell variable ‘ac_aux_dir’ to the directory where it can be found. The value of ‘ac_aux_dir’ is guaranteed to end with a ‘/’. Macros that need auxiliary scripts must use this macro to register each script they need. ‘configure’ checks for all the auxiliary scripts it needs on startup, and exits with an error if any are missing. ‘autoreconf’ also detects missing auxiliary scripts. When used with the ‘--install’ option, ‘autoreconf’ will try to add missing scripts to the directory specified by ‘AC_CONFIG_AUX_DIR’, or to the top level of the source tree if ‘AC_CONFIG_AUX_DIR’ was not used. It can always do this for the scripts needed by Autoconf core macros: ‘install-sh’, ‘config.sub’, and ‘config.guess’. Many other commonly-needed scripts are installed by the third-party tools that ‘autoreconf’ knows how to run, such as ‘missing’ for Automake and ‘ltmain.sh’ for Libtool. If you are using Automake, auxiliary scripts will automatically be included in the tarball created by ‘make dist’. If you are not using Automake you will need to arrange for auxiliary scripts to be included in tarballs yourself. Auxiliary scripts should normally _not_ be checked into a version control system, for the same reasons that ‘configure’ shouldn’t be. The scripts needed by Autoconf core macros can be found in ‘$(datadir)/autoconf/build-aux’ of the Autoconf installation (*note Installation Directory Variables::). ‘install-sh’ can be downloaded from . ‘config.sub’ and ‘config.guess’ can be downloaded from .  File: autoconf.info, Node: Output, Next: Configuration Actions, Prev: Input, Up: Setup 4.5 Outputting Files ==================== Every Autoconf script, e.g., ‘configure.ac’, should finish by calling ‘AC_OUTPUT’. That is the macro that generates and runs ‘config.status’, which in turn creates the makefiles and any other files resulting from configuration. This is the only required macro besides ‘AC_INIT’ (*note Input::). -- Macro: AC_OUTPUT Generate ‘config.status’ and launch it. Call this macro once, at the end of ‘configure.ac’. ‘config.status’ performs all the configuration actions: all the output files (see *note Configuration Files::, macro ‘AC_CONFIG_FILES’), header files (see *note Configuration Headers::, macro ‘AC_CONFIG_HEADERS’), commands (see *note Configuration Commands::, macro ‘AC_CONFIG_COMMANDS’), links (see *note Configuration Links::, macro ‘AC_CONFIG_LINKS’), subdirectories to configure (see *note Subdirectories::, macro ‘AC_CONFIG_SUBDIRS’) are honored. The location of your ‘AC_OUTPUT’ invocation is the exact point where configuration actions are taken: any code afterwards is executed by ‘configure’ once ‘config.status’ was run. If you want to bind actions to ‘config.status’ itself (independently of whether ‘configure’ is being run), see *note Running Arbitrary Configuration Commands: Configuration Commands. Historically, the usage of ‘AC_OUTPUT’ was somewhat different. *Note Obsolete Macros::, for a description of the arguments that ‘AC_OUTPUT’ used to support. If you run ‘make’ in subdirectories, you should run it using the ‘make’ variable ‘MAKE’. Most versions of ‘make’ set ‘MAKE’ to the name of the ‘make’ program plus any options it was given. (But many do not include in it the values of any variables set on the command line, so those are not passed on automatically.) Some old versions of ‘make’ do not set this variable. The following macro allows you to use it even with those versions. -- Macro: AC_PROG_MAKE_SET If the Make command, ‘$MAKE’ if set or else ‘make’, predefines ‘$(MAKE)’, define output variable ‘SET_MAKE’ to be empty. Otherwise, define ‘SET_MAKE’ to a macro definition that sets ‘$(MAKE)’, such as ‘MAKE=make’. Calls ‘AC_SUBST’ for ‘SET_MAKE’. If you use this macro, place a line like this in each ‘Makefile.in’ that runs ‘MAKE’ on other directories: @SET_MAKE@  File: autoconf.info, Node: Configuration Actions, Next: Configuration Files, Prev: Output, Up: Setup 4.6 Performing Configuration Actions ==================================== ‘configure’ is designed so that it appears to do everything itself, but there is actually a hidden slave: ‘config.status’. ‘configure’ is in charge of examining your system, but it is ‘config.status’ that actually takes the proper actions based on the results of ‘configure’. The most typical task of ‘config.status’ is to _instantiate_ files. This section describes the common behavior of the four standard instantiating macros: ‘AC_CONFIG_FILES’, ‘AC_CONFIG_HEADERS’, ‘AC_CONFIG_COMMANDS’ and ‘AC_CONFIG_LINKS’. They all have this prototype: AC_CONFIG_ITEMS(TAG..., [COMMANDS], [INIT-CMDS]) where the arguments are: TAG... A blank-or-newline-separated list of tags, which are typically the names of the files to instantiate. You are encouraged to use literals as TAGS. In particular, you should avoid ... && my_foos="$my_foos fooo" ... && my_foos="$my_foos foooo" AC_CONFIG_ITEMS([$my_foos]) and use this instead: ... && AC_CONFIG_ITEMS([fooo]) ... && AC_CONFIG_ITEMS([foooo]) The macros ‘AC_CONFIG_FILES’ and ‘AC_CONFIG_HEADERS’ use special TAG values: they may have the form ‘OUTPUT’ or ‘OUTPUT:INPUTS’. The file OUTPUT is instantiated from its templates, INPUTS (defaulting to ‘OUTPUT.in’). ‘AC_CONFIG_FILES([Makefile:boiler/top.mk:boiler/bot.mk])’, for example, asks for the creation of the file ‘Makefile’ that contains the expansion of the output variables in the concatenation of ‘boiler/top.mk’ and ‘boiler/bot.mk’. The special value ‘-’ might be used to denote the standard output when used in OUTPUT, or the standard input when used in the INPUTS. You most probably don’t need to use this in ‘configure.ac’, but it is convenient when using the command line interface of ‘./config.status’, see *note config.status Invocation::, for more details. The INPUTS may be absolute or relative file names. In the latter case they are first looked for in the build tree, and then in the source tree. Input files should be text files, and a line length below 2000 bytes should be safe. COMMANDS Shell commands output literally into ‘config.status’, and associated with a tag that the user can use to tell ‘config.status’ which commands to run. The commands are run each time a TAG request is given to ‘config.status’, typically each time the file ‘TAG’ is created. The variables set during the execution of ‘configure’ are _not_ available here: you first need to set them via the INIT-CMDS. Nonetheless the following variables are pre-computed: ‘srcdir’ The name of the top source directory, assuming that the working directory is the top build directory. This is what ‘configure’’s ‘--srcdir’ option sets. ‘ac_top_srcdir’ The name of the top source directory, assuming that the working directory is the current build directory. ‘ac_top_build_prefix’ The name of the top build directory, assuming that the working directory is the current build directory. It can be empty, or else ends with a slash, so that you may concatenate it. ‘ac_srcdir’ The name of the corresponding source directory, assuming that the working directory is the current build directory. ‘tmp’ The name of a temporary directory within the build tree, which you can use if you need to create additional temporary files. The directory is cleaned up when ‘config.status’ is done or interrupted. Please use package-specific file name prefixes to avoid clashing with files that ‘config.status’ may use internally. The “current” directory refers to the directory (or pseudo-directory) containing the input part of TAGS. For instance, running AC_CONFIG_COMMANDS([deep/dir/out:in/in.in], [...], [...]) with ‘--srcdir=../package’ produces the following values: # Argument of --srcdir srcdir='../package' # Reversing deep/dir ac_top_build_prefix='../../' # Concatenation of $ac_top_build_prefix and srcdir ac_top_srcdir='../../../package' # Concatenation of $ac_top_srcdir and deep/dir ac_srcdir='../../../package/deep/dir' independently of ‘in/in.in’. INIT-CMDS Shell commands output _unquoted_ near the beginning of ‘config.status’, and executed each time ‘config.status’ runs (regardless of the tag). Because they are unquoted, for example, ‘$var’ is output as the value of ‘var’. INIT-CMDS is typically used by ‘configure’ to give ‘config.status’ some variables it needs to run the COMMANDS. You should be extremely cautious in your variable names: all the INIT-CMDS share the same name space and may overwrite each other in unpredictable ways. Sorry... All these macros can be called multiple times, with different TAG values, of course!  File: autoconf.info, Node: Configuration Files, Next: Makefile Substitutions, Prev: Configuration Actions, Up: Setup 4.7 Creating Configuration Files ================================ Be sure to read the previous section, *note Configuration Actions::. -- Macro: AC_CONFIG_FILES (FILE..., [CMDS], [INIT-CMDS]) Make ‘AC_OUTPUT’ create each ‘FILE’ by copying an input file (by default ‘FILE.in’), substituting the output variable values. This macro is one of the instantiating macros; see *note Configuration Actions::. *Note Makefile Substitutions::, for more information on using output variables. *Note Setting Output Variables::, for more information on creating them. This macro creates the directory that the file is in if it doesn’t exist. Usually, makefiles are created this way, but other files, such as ‘.gdbinit’, can be specified as well. Typical calls to ‘AC_CONFIG_FILES’ look like this: AC_CONFIG_FILES([Makefile src/Makefile man/Makefile X/Imakefile]) AC_CONFIG_FILES([autoconf], [chmod +x autoconf]) You can override an input file name by appending to FILE a colon-separated list of input files. Examples: AC_CONFIG_FILES([Makefile:boiler/top.mk:boiler/bot.mk] [lib/Makefile:boiler/lib.mk]) Doing this allows you to keep your file names acceptable to DOS variants, or to prepend and/or append boilerplate to the file. The FILE names should not contain shell metacharacters. *Note Special Chars in Variables::.  File: autoconf.info, Node: Makefile Substitutions, Next: Configuration Headers, Prev: Configuration Files, Up: Setup 4.8 Substitutions in Makefiles ============================== Each subdirectory in a distribution that contains something to be compiled or installed should come with a file ‘Makefile.in’, from which ‘configure’ creates a file ‘Makefile’ in that directory. To create ‘Makefile’, ‘configure’ performs a simple variable substitution, replacing occurrences of ‘@VARIABLE@’ in ‘Makefile.in’ with the value that ‘configure’ has determined for that variable. Variables that are substituted into output files in this way are called “output variables”. They are ordinary shell variables that are set in ‘configure’. To make ‘configure’ substitute a particular variable into the output files, the macro ‘AC_SUBST’ must be called with that variable name as an argument. Any occurrences of ‘@VARIABLE@’ for other variables are left unchanged. *Note Setting Output Variables::, for more information on creating output variables with ‘AC_SUBST’. A software package that uses a ‘configure’ script should be distributed with a file ‘Makefile.in’, but no makefile; that way, the user has to properly configure the package for the local system before compiling it. *Note Makefile Conventions: (standards)Makefile Conventions, for more information on what to put in makefiles. * Menu: * Preset Output Variables:: Output variables that are always set * Installation Directory Variables:: Other preset output variables * Changed Directory Variables:: Warnings about ‘datarootdir’ * Build Directories:: Supporting multiple concurrent compiles * Automatic Remaking:: Makefile rules for configuring  File: autoconf.info, Node: Preset Output Variables, Next: Installation Directory Variables, Up: Makefile Substitutions 4.8.1 Preset Output Variables ----------------------------- Some output variables are preset by the Autoconf macros. Some of the Autoconf macros set additional output variables, which are mentioned in the descriptions for those macros. *Note Output Variable Index::, for a complete list of output variables. *Note Installation Directory Variables::, for the list of the preset ones related to installation directories. Below are listed the other preset ones, many of which are precious variables (*note Setting Output Variables::, ‘AC_ARG_VAR’). The preset variables which are available during ‘config.status’ (*note Configuration Actions::) may also be used during ‘configure’ tests. For example, it is permissible to reference ‘$srcdir’ when constructing a list of directories to pass via the ‘-I’ option during a compiler feature check. When used in this manner, coupled with the fact that ‘configure’ is always run from the top build directory, it is sufficient to use just ‘$srcdir’ instead of ‘$top_srcdir’. -- Variable: CFLAGS Debugging and optimization options for the C compiler. If it is not set in the environment when ‘configure’ runs, the default value is set when you call ‘AC_PROG_CC’ (or empty if you don’t). ‘configure’ uses this variable when compiling or linking programs to test for C features. If a compiler option affects only the behavior of the preprocessor (e.g., ‘-DNAME’), it should be put into ‘CPPFLAGS’ instead. If it affects only the linker (e.g., ‘-LDIRECTORY’), it should be put into ‘LDFLAGS’ instead. If it affects only the compiler proper, ‘CFLAGS’ is the natural home for it. If an option affects multiple phases of the compiler, though, matters get tricky: • If an option selects a 32-bit or 64-bit build on a bi-arch system, it must be put direcly into ‘CC’, e.g., ‘CC='gcc -m64'’. This is necessary for ‘config.guess’ to work right. • Otherwise one approach is to put the option into ‘CC’. Another is to put it into both ‘CPPFLAGS’ and ‘LDFLAGS’, but not into ‘CFLAGS’. However, remember that some ‘Makefile’ variables are reserved by the GNU Coding Standards for the use of the “user”—the person building the package. For instance, ‘CFLAGS’ is one such variable. Sometimes package developers are tempted to set user variables such as ‘CFLAGS’ because it appears to make their job easier. However, the package itself should never set a user variable, particularly not to include switches that are required for proper compilation of the package. Since these variables are documented as being for the package builder, that person rightfully expects to be able to override any of these variables at build time. If the package developer needs to add switches without interfering with the user, the proper way to do that is to introduce an additional variable. Automake makes this easy by introducing ‘AM_CFLAGS’ (*note (automake)Flag Variables Ordering::), but the concept is the same even if Automake is not used. -- Variable: configure_input A comment saying that the file was generated automatically by ‘configure’ and giving the name of the input file. ‘AC_OUTPUT’ adds a comment line containing this variable to the top of every makefile it creates. For other files, you should reference this variable in a comment at the top of each input file. For example, an input shell script should begin like this: #!/bin/sh # @configure_input@ The presence of that line also reminds people editing the file that it needs to be processed by ‘configure’ in order to be used. -- Variable: CPPFLAGS Preprocessor options for the C, C++, Objective C, and Objective C++ preprocessors and compilers. If it is not set in the environment when ‘configure’ runs, the default value is empty. ‘configure’ uses this variable when preprocessing or compiling programs to test for C, C++, Objective C, and Objective C++ features. This variable’s contents should contain options like ‘-I’, ‘-D’, and ‘-U’ that affect only the behavior of the preprocessor. Please see the explanation of ‘CFLAGS’ for what you can do if an option affects other phases of the compiler as well. Currently, ‘configure’ always links as part of a single invocation of the compiler that also preprocesses and compiles, so it uses this variable also when linking programs. However, it is unwise to depend on this behavior because the GNU Coding Standards do not require it and many packages do not use ‘CPPFLAGS’ when linking programs. *Note Special Chars in Variables::, for limitations that ‘CPPFLAGS’ might run into. -- Variable: CXXFLAGS Debugging and optimization options for the C++ compiler. It acts like ‘CFLAGS’, but for C++ instead of C. -- Variable: DEFS ‘-D’ options to pass to the C compiler. If ‘AC_CONFIG_HEADERS’ is called, ‘configure’ replaces ‘@DEFS@’ with ‘-DHAVE_CONFIG_H’ instead (*note Configuration Headers::). This variable is not defined while ‘configure’ is performing its tests, only when creating the output files. *Note Setting Output Variables::, for how to check the results of previous tests. -- Variable: ECHO_C -- Variable: ECHO_N -- Variable: ECHO_T How does one suppress the trailing newline from ‘echo’ for question-answer message pairs? These variables provide a way: echo $ECHO_N "And the winner is... $ECHO_C" sleep 100000000000 echo "${ECHO_T}dead." Some old and uncommon ‘echo’ implementations offer no means to achieve this, in which case ‘ECHO_T’ is set to tab. You might not want to use it. -- Variable: ERLCFLAGS Debugging and optimization options for the Erlang compiler. If it is not set in the environment when ‘configure’ runs, the default value is empty. ‘configure’ uses this variable when compiling programs to test for Erlang features. -- Variable: FCFLAGS Debugging and optimization options for the Fortran compiler. If it is not set in the environment when ‘configure’ runs, the default value is set when you call ‘AC_PROG_FC’ (or empty if you don’t). ‘configure’ uses this variable when compiling or linking programs to test for Fortran features. -- Variable: FFLAGS Debugging and optimization options for the Fortran 77 compiler. If it is not set in the environment when ‘configure’ runs, the default value is set when you call ‘AC_PROG_F77’ (or empty if you don’t). ‘configure’ uses this variable when compiling or linking programs to test for Fortran 77 features. -- Variable: LDFLAGS Options for the linker. If it is not set in the environment when ‘configure’ runs, the default value is empty. ‘configure’ uses this variable when linking programs to test for C, C++, Objective C, Objective C++, Fortran, and Go features. This variable’s contents should contain options like ‘-s’ and ‘-L’ that affect only the behavior of the linker. Please see the explanation of ‘CFLAGS’ for what you can do if an option also affects other phases of the compiler. Don’t use this variable to pass library names (‘-l’) to the linker; use ‘LIBS’ instead. -- Variable: LIBS ‘-l’ options to pass to the linker. The default value is empty, but some Autoconf macros may prepend extra libraries to this variable if those libraries are found and provide necessary functions, see *note Libraries::. ‘configure’ uses this variable when linking programs to test for C, C++, Objective C, Objective C++, Fortran, and Go features. -- Variable: OBJCFLAGS Debugging and optimization options for the Objective C compiler. It acts like ‘CFLAGS’, but for Objective C instead of C. -- Variable: OBJCXXFLAGS Debugging and optimization options for the Objective C++ compiler. It acts like ‘CXXFLAGS’, but for Objective C++ instead of C++. -- Variable: GOFLAGS Debugging and optimization options for the Go compiler. It acts like ‘CFLAGS’, but for Go instead of C. -- Variable: builddir Rigorously equal to ‘.’. Added for symmetry only. -- Variable: abs_builddir Absolute name of ‘builddir’. -- Variable: top_builddir The relative name of the top level of the current build tree. In the top-level directory, this is the same as ‘builddir’. -- Variable: top_build_prefix The relative name of the top level of the current build tree with final slash if nonempty. This is the same as ‘top_builddir’, except that it contains zero or more runs of ‘../’, so it should not be appended with a slash for concatenation. This helps for ‘make’ implementations that otherwise do not treat ‘./file’ and ‘file’ as equal in the top-level build directory. -- Variable: abs_top_builddir Absolute name of ‘top_builddir’. -- Variable: srcdir The name of the directory that contains the source code for that makefile. -- Variable: abs_srcdir Absolute name of ‘srcdir’. -- Variable: top_srcdir The name of the top-level source code directory for the package. In the top-level directory, this is the same as ‘srcdir’. -- Variable: abs_top_srcdir Absolute name of ‘top_srcdir’.  File: autoconf.info, Node: Installation Directory Variables, Next: Changed Directory Variables, Prev: Preset Output Variables, Up: Makefile Substitutions 4.8.2 Installation Directory Variables -------------------------------------- The following variables specify the directories for package installation, see *note Variables for Installation Directories: (standards)Directory Variables, for more information. Each variable corresponds to an argument of ‘configure’; trailing slashes are stripped so that expressions such as ‘${prefix}/lib’ expand with only one slash between directory names. See the end of this section for details on when and how to use these variables. -- Variable: bindir The directory for installing executables that users run. -- Variable: datadir The directory for installing idiosyncratic read-only architecture-independent data. -- Variable: datarootdir The root of the directory tree for read-only architecture-independent data files. -- Variable: docdir The directory for installing documentation files (other than Info and man). -- Variable: dvidir The directory for installing documentation files in DVI format. -- Variable: exec_prefix The installation prefix for architecture-dependent files. By default it’s the same as ‘prefix’. You should avoid installing anything directly to ‘exec_prefix’. However, the default value for directories containing architecture-dependent files should be relative to ‘exec_prefix’. -- Variable: htmldir The directory for installing HTML documentation. -- Variable: includedir The directory for installing C header files. -- Variable: infodir The directory for installing documentation in Info format. -- Variable: libdir The directory for installing object code libraries. -- Variable: libexecdir The directory for installing executables that other programs run. -- Variable: localedir The directory for installing locale-dependent but architecture-independent data, such as message catalogs. This directory usually has a subdirectory per locale. -- Variable: localstatedir The directory for installing modifiable single-machine data. Content in this directory typically survives a reboot. -- Variable: runstatedir The directory for installing temporary modifiable single-machine data. Content in this directory survives as long as the process is running (such as pid files), as contrasted with ‘/tmp’ that may be periodically cleaned. Conversely, this directory is typically cleaned on a reboot. By default, this is a subdirectory of ‘localstatedir’. -- Variable: mandir The top-level directory for installing documentation in man format. -- Variable: oldincludedir The directory for installing C header files for non-GCC compilers. -- Variable: pdfdir The directory for installing PDF documentation. -- Variable: prefix The common installation prefix for all files. If ‘exec_prefix’ is defined to a different value, ‘prefix’ is used only for architecture-independent files. -- Variable: psdir The directory for installing PostScript documentation. -- Variable: sbindir The directory for installing executables that system administrators run. -- Variable: sharedstatedir The directory for installing modifiable architecture-independent data. -- Variable: sysconfdir The directory for installing read-only single-machine data. Most of these variables have values that rely on ‘prefix’ or ‘exec_prefix’. It is deliberate that the directory output variables keep them unexpanded: typically ‘@datarootdir@’ is replaced by ‘${prefix}/share’, not ‘/usr/local/share’, and ‘@datadir@’ is replaced by ‘${datarootdir}’. This behavior is mandated by the GNU Coding Standards, so that when the user runs: ‘make’ she can still specify a different prefix from the one specified to ‘configure’, in which case, if needed, the package should hard code dependencies corresponding to the make-specified prefix. ‘make install’ she can specify a different installation location, in which case the package _must_ still depend on the location which was compiled in (i.e., never recompile when ‘make install’ is run). This is an extremely important feature, as many people may decide to install all the files of a package grouped together, and then install links from the final locations to there. In order to support these features, it is essential that ‘datarootdir’ remains defined as ‘${prefix}/share’, so that its value can be expanded based on the current value of ‘prefix’. A corollary is that you should not use these variables except in makefiles. For instance, instead of trying to evaluate ‘datadir’ in ‘configure’ and hard-coding it in makefiles using e.g., ‘AC_DEFINE_UNQUOTED([DATADIR], ["$datadir"], [Data directory.])’, you should add ‘-DDATADIR='$(datadir)'’ to your makefile’s definition of ‘CPPFLAGS’ (‘AM_CPPFLAGS’ if you are also using Automake). Similarly, you should not rely on ‘AC_CONFIG_FILES’ to replace ‘bindir’ and friends in your shell scripts and other files; instead, let ‘make’ manage their replacement. For instance Autoconf ships templates of its shell scripts ending with ‘.in’, and uses a makefile snippet similar to the following to build scripts like ‘autoheader’ and ‘autom4te’: edit = sed \ -e 's|@bindir[@]|$(bindir)|g' \ -e 's|@pkgdatadir[@]|$(pkgdatadir)|g' \ -e 's|@prefix[@]|$(prefix)|g' autoheader autom4te: Makefile rm -f $@ $@.tmp srcdir=''; \ test -f ./$@.in || srcdir=$(srcdir)/; \ $(edit) $${srcdir}$@.in >$@.tmp chmod +x $@.tmp chmod a-w $@.tmp mv $@.tmp $@ autoheader: $(srcdir)/autoheader.in autom4te: $(srcdir)/autom4te.in Some details are noteworthy: ‘@bindir[@]’ The brackets prevent ‘configure’ from replacing ‘@bindir@’ in the Sed expression itself. Brackets are preferable to a backslash here, since Posix says ‘\@’ is not portable. ‘$(bindir)’ Don’t use ‘@bindir@’! Use the matching makefile variable instead. ‘$(pkgdatadir)’ The example takes advantage of the variable ‘$(pkgdatadir)’ provided by Automake; it is equivalent to ‘$(datadir)/$(PACKAGE)’. ‘/’ Don’t use ‘/’ in the Sed expressions that replace file names since most likely the variables you use, such as ‘$(bindir)’, contain ‘/’. Use a shell metacharacter instead, such as ‘|’. special characters File names, file name components, and the value of ‘VPATH’ should not contain shell metacharacters or white space. *Note Special Chars in Variables::. dependency on ‘Makefile’ Since ‘edit’ uses values that depend on the configuration specific values (‘prefix’, etc.) and not only on ‘VERSION’ and so forth, the output depends on ‘Makefile’, not ‘configure.ac’. ‘$@’ The main rule is generic, and uses ‘$@’ extensively to avoid the need for multiple copies of the rule. Separated dependencies and single suffix rules You can’t use them! The above snippet cannot be (portably) rewritten as: autoconf autoheader: Makefile .in: rm -f $@ $@.tmp $(edit) $< >$@.tmp chmod +x $@.tmp mv $@.tmp $@ *Note Single Suffix Rules::, for details. ‘$(srcdir)’ Be sure to specify the name of the source directory, otherwise the package won’t support separated builds. For the more specific installation of Erlang libraries, the following variables are defined: -- Variable: ERLANG_INSTALL_LIB_DIR The common parent directory of Erlang library installation directories. This variable is set by calling the ‘AC_ERLANG_SUBST_INSTALL_LIB_DIR’ macro in ‘configure.ac’. -- Variable: ERLANG_INSTALL_LIB_DIR_LIBRARY The installation directory for Erlang library LIBRARY. This variable is set by using the ‘AC_ERLANG_SUBST_INSTALL_LIB_SUBDIR’ macro in ‘configure.ac’. *Note Erlang Libraries::, for details.  File: autoconf.info, Node: Changed Directory Variables, Next: Build Directories, Prev: Installation Directory Variables, Up: Makefile Substitutions 4.8.3 Changed Directory Variables --------------------------------- In Autoconf 2.60, the set of directory variables has changed, and the defaults of some variables have been adjusted (*note Installation Directory Variables::) to changes in the GNU Coding Standards. Notably, ‘datadir’, ‘infodir’, and ‘mandir’ are now expressed in terms of ‘datarootdir’. If you are upgrading from an earlier Autoconf version, you may need to adjust your files to ensure that the directory variables are substituted correctly (*note Defining Directories::), and that a definition of ‘datarootdir’ is in place. For example, in a ‘Makefile.in’, adding datarootdir = @datarootdir@ is usually sufficient. If you use Automake to create ‘Makefile.in’, it will add this for you. To help with the transition, Autoconf warns about files that seem to use ‘datarootdir’ without defining it. In some cases, it then expands the value of ‘$datarootdir’ in substitutions of the directory variables. The following example shows such a warning: $ cat configure.ac AC_INIT AC_CONFIG_FILES([Makefile]) AC_OUTPUT $ cat Makefile.in prefix = @prefix@ datadir = @datadir@ $ autoconf $ configure configure: creating ./config.status config.status: creating Makefile config.status: WARNING: Makefile.in seems to ignore the --datarootdir setting $ cat Makefile prefix = /usr/local datadir = ${prefix}/share Usually one can easily change the file to accommodate both older and newer Autoconf releases: $ cat Makefile.in prefix = @prefix@ datarootdir = @datarootdir@ datadir = @datadir@ $ configure configure: creating ./config.status config.status: creating Makefile $ cat Makefile prefix = /usr/local datarootdir = ${prefix}/share datadir = ${datarootdir} In some cases, however, the checks may not be able to detect that a suitable initialization of ‘datarootdir’ is in place, or they may fail to detect that such an initialization is necessary in the output file. If, after auditing your package, there are still spurious ‘configure’ warnings about ‘datarootdir’, you may add the line AC_DEFUN([AC_DATAROOTDIR_CHECKED]) to your ‘configure.ac’ to disable the warnings. This is an exception to the usual rule that you should not define a macro whose name begins with ‘AC_’ (*note Macro Names::).  File: autoconf.info, Node: Build Directories, Next: Automatic Remaking, Prev: Changed Directory Variables, Up: Makefile Substitutions 4.8.4 Build Directories ----------------------- You can support compiling a software package for several architectures simultaneously from the same copy of the source code. The object files for each architecture are kept in their own directory. To support doing this, ‘make’ uses the ‘VPATH’ variable to find the files that are in the source directory. GNU Make can do this. Most other recent ‘make’ programs can do this as well, though they may have difficulties and it is often simpler to recommend GNU ‘make’ (*note VPATH and Make::). Older ‘make’ programs do not support ‘VPATH’; when using them, the source code must be in the same directory as the object files. If you are using GNU Automake, the remaining details in this section are already covered for you, based on the contents of your ‘Makefile.am’. But if you are using Autoconf in isolation, then supporting ‘VPATH’ requires the following in your ‘Makefile.in’: srcdir = @srcdir@ VPATH = @srcdir@ Do not set ‘VPATH’ to the value of another variable (*note Variables listed in VPATH::. ‘configure’ substitutes the correct value for ‘srcdir’ when it produces ‘Makefile’. Do not use the ‘make’ variable ‘$<’, which expands to the file name of the file in the source directory (found with ‘VPATH’), except in implicit rules. (An implicit rule is one such as ‘.c.o’, which tells how to create a ‘.o’ file from a ‘.c’ file.) Some versions of ‘make’ do not set ‘$<’ in explicit rules; they expand it to an empty value. Instead, Make command lines should always refer to source files by prefixing them with ‘$(srcdir)/’. It’s safer to quote the source directory name, in case it contains characters that are special to the shell. Because ‘$(srcdir)’ is expanded by Make, single-quoting works and is safer than double-quoting. For example: time.info: time.texinfo $(MAKEINFO) '$(srcdir)/time.texinfo'  File: autoconf.info, Node: Automatic Remaking, Prev: Build Directories, Up: Makefile Substitutions 4.8.5 Automatic Remaking ------------------------ You can put rules like the following in the top-level ‘Makefile.in’ for a package to automatically update the configuration information when you change the configuration files. This example includes all of the optional files, such as ‘aclocal.m4’ and those related to configuration header files. Omit from the ‘Makefile.in’ rules for any of these files that your package does not use. The ‘$(srcdir)/’ prefix is included because of limitations in the ‘VPATH’ mechanism. The ‘stamp-’ files are necessary because the timestamps of ‘config.h.in’ and ‘config.h’ are not changed if remaking them does not change their contents. This feature avoids unnecessary recompilation. You should include the file ‘stamp-h.in’ in your package’s distribution, so that ‘make’ considers ‘config.h.in’ up to date. Don’t use ‘touch’ (*note Limitations of Usual Tools: touch.); instead, use ‘echo’ (using ‘date’ would cause needless differences, hence CVS conflicts, etc.). $(srcdir)/configure: configure.ac aclocal.m4 cd '$(srcdir)' && autoconf # autoheader might not change config.h.in, so touch a stamp file. $(srcdir)/config.h.in: stamp-h.in ; $(srcdir)/stamp-h.in: configure.ac aclocal.m4 cd '$(srcdir)' && autoheader echo timestamp > '$(srcdir)/stamp-h.in' config.h: stamp-h ; stamp-h: config.h.in config.status ./config.status Makefile: Makefile.in config.status ./config.status config.status: configure ./config.status --recheck (Be careful if you copy these lines directly into your makefile, as you need to convert the indented lines to start with the tab character.) In addition, you should use AC_CONFIG_FILES([stamp-h], [echo timestamp > stamp-h]) so ‘config.status’ ensures that ‘config.h’ is considered up to date. *Note Output::, for more information about ‘AC_OUTPUT’. *Note config.status Invocation::, for more examples of handling configuration-related dependencies.  File: autoconf.info, Node: Configuration Headers, Next: Configuration Commands, Prev: Makefile Substitutions, Up: Setup 4.9 Configuration Header Files ============================== When a package contains more than a few tests that define C preprocessor symbols, the command lines to pass ‘-D’ options to the compiler can get quite long. This causes two problems. One is that the ‘make’ output is hard to visually scan for errors. More seriously, the command lines can exceed the length limits of some operating systems. As an alternative to passing ‘-D’ options to the compiler, ‘configure’ scripts can create a C header file containing ‘#define’ directives. The ‘AC_CONFIG_HEADERS’ macro selects this kind of output. Though it can be called anywhere between ‘AC_INIT’ and ‘AC_OUTPUT’, it is customary to call it right after ‘AC_INIT’. The package should ‘#include’ the configuration header file before any other header files, to prevent inconsistencies in declarations (for example, if it redefines ‘const’, or if it defines a macro like ‘_FILE_OFFSET_BITS’ that affects the behavior of system headers). Note that it is okay to only include ‘config.h’ from ‘.c’ files; the project’s ‘.h’ files can rely on ‘config.h’ already being included first by the corresponding ‘.c’ file. To provide for VPATH builds, remember to pass the C compiler a ‘-I.’ option (or ‘-I..’; whichever directory contains ‘config.h’). Even if you use ‘#include "config.h"’, the preprocessor searches only the directory of the currently read file, i.e., the source directory, not the build directory. With the appropriate ‘-I’ option, you can use ‘#include ’. Actually, it’s a good habit to use it, because in the rare case when the source directory contains another ‘config.h’, the build directory should be searched first. -- Macro: AC_CONFIG_HEADERS (HEADER ..., [CMDS], [INIT-CMDS]) This macro is one of the instantiating macros; see *note Configuration Actions::. Make ‘AC_OUTPUT’ create the file(s) in the blank-or-newline-separated list HEADER containing C preprocessor ‘#define’ statements, and replace ‘@DEFS@’ in generated files with ‘-DHAVE_CONFIG_H’ instead of the value of ‘DEFS’. The usual name for HEADER is ‘config.h’; HEADER should not contain shell metacharacters. *Note Special Chars in Variables::. If HEADER already exists and its contents are identical to what ‘AC_OUTPUT’ would put in it, it is left alone. Doing this allows making some changes in the configuration without needlessly causing object files that depend on the header file to be recompiled. Usually the input file is named ‘HEADER.in’; however, you can override the input file name by appending to HEADER a colon-separated list of input files. For example, you might need to make the input file name acceptable to DOS variants: AC_CONFIG_HEADERS([config.h:config.hin]) -- Macro: AH_HEADER This macro is defined as the name of the first declared config header and undefined if no config headers have been declared up to this point. A third-party macro may, for example, require use of a config header without invoking AC_CONFIG_HEADERS twice, like this: AC_CONFIG_COMMANDS_PRE( [m4_ifndef([AH_HEADER], [AC_CONFIG_HEADERS([config.h])])]) *Note Configuration Actions::, for more details on HEADER. * Menu: * Header Templates:: Input for the configuration headers * autoheader Invocation:: How to create configuration templates * Autoheader Macros:: How to specify CPP templates  File: autoconf.info, Node: Header Templates, Next: autoheader Invocation, Up: Configuration Headers 4.9.1 Configuration Header Templates ------------------------------------ Your distribution should contain a template file that looks as you want the final header file to look, including comments, with ‘#undef’ statements which are used as hooks. For example, suppose your ‘configure.ac’ makes these calls: AC_CONFIG_HEADERS([conf.h]) AC_CHECK_HEADERS([unistd.h]) Then you could have code like the following in ‘conf.h.in’. The ‘conf.h’ created by ‘configure’ defines ‘HAVE_UNISTD_H’ to 1, if and only if the system has ‘unistd.h’. /* Define as 1 if you have unistd.h. */ #undef HAVE_UNISTD_H The format of the template file is stricter than what the C preprocessor is required to accept. A directive line should contain only whitespace, ‘#undef’, and ‘HAVE_UNISTD_H’. The use of ‘#define’ instead of ‘#undef’, or of comments on the same line as ‘#undef’, is strongly discouraged. Each hook should only be listed once. Other preprocessor lines, such as ‘#ifdef’ or ‘#include’, are copied verbatim from the template into the generated header. Since it is a tedious task to keep a template header up to date, you may use ‘autoheader’ to generate it, see *note autoheader Invocation::. During the instantiation of the header, each ‘#undef’ line in the template file for each symbol defined by ‘AC_DEFINE’ is changed to an appropriate ‘#define’. If the corresponding ‘AC_DEFINE’ has not been executed during the ‘configure’ run, the ‘#undef’ line is commented out. (This is important, e.g., for ‘_POSIX_SOURCE’: on many systems, it can be implicitly defined by the compiler, and undefining it in the header would then break compilation of subsequent headers.) Currently, _all_ remaining ‘#undef’ lines in the header template are commented out, whether or not there was a corresponding ‘AC_DEFINE’ for the macro name; but this behavior is not guaranteed for future releases of Autoconf. Generally speaking, since you should not use ‘#define’, and you cannot guarantee whether a ‘#undef’ directive in the header template will be converted to a ‘#define’ or commented out in the generated header file, the template file cannot be used for conditional definition effects. Consequently, if you need to use the construct #ifdef THIS # define THAT #endif you must place it outside of the template. If you absolutely need to hook it to the config header itself, please put the directives to a separate file, and ‘#include’ that file from the config header template. If you are using ‘autoheader’, you would probably use ‘AH_BOTTOM’ to append the ‘#include’ directive.  File: autoconf.info, Node: autoheader Invocation, Next: Autoheader Macros, Prev: Header Templates, Up: Configuration Headers 4.9.2 Using ‘autoheader’ to Create ‘config.h.in’ ------------------------------------------------ The ‘autoheader’ program can create a template file of C ‘#define’ statements for ‘configure’ to use. It searches for the first invocation of ‘AC_CONFIG_HEADERS’ in ‘configure’ sources to determine the name of the template. (If the first call of ‘AC_CONFIG_HEADERS’ specifies more than one input file name, ‘autoheader’ uses the first one.) It is recommended that only one input file is used. If you want to append a boilerplate code, it is preferable to use ‘AH_BOTTOM([#include ])’. File ‘conf_post.h’ is not processed during the configuration then, which make things clearer. Analogically, ‘AH_TOP’ can be used to prepend a boilerplate code. In order to do its job, ‘autoheader’ needs you to document all of the symbols that you might use. Typically this is done via an ‘AC_DEFINE’ or ‘AC_DEFINE_UNQUOTED’ call whose first argument is a literal symbol and whose third argument describes the symbol (*note Defining Symbols::). Alternatively, you can use ‘AH_TEMPLATE’ (*note Autoheader Macros::), or you can supply a suitable input file for a subsequent configuration header file. Symbols defined by Autoconf’s builtin tests are already documented properly; you need to document only those that you define yourself. You might wonder why ‘autoheader’ is needed: after all, why would ‘configure’ need to “patch” a ‘config.h.in’ to produce a ‘config.h’ instead of just creating ‘config.h’ from scratch? Well, when everything rocks, the answer is just that we are wasting our time maintaining ‘autoheader’: generating ‘config.h’ directly is all that is needed. When things go wrong, however, you’ll be thankful for the existence of ‘autoheader’. The fact that the symbols are documented is important in order to _check_ that ‘config.h’ makes sense. The fact that there is a well-defined list of symbols that should be defined (or not) is also important for people who are porting packages to environments where ‘configure’ cannot be run: they just have to _fill in the blanks_. But let’s come back to the point: the invocation of ‘autoheader’... If you give ‘autoheader’ an argument, it uses that file instead of ‘configure.ac’ and writes the header file to the standard output instead of to ‘config.h.in’. If you give ‘autoheader’ an argument of ‘-’, it reads the standard input instead of ‘configure.ac’ and writes the header file to the standard output. ‘autoheader’ accepts the following options: ‘--help’ ‘-h’ Print a summary of the command line options and exit. ‘--version’ ‘-V’ Print the version number of Autoconf and exit. ‘--verbose’ ‘-v’ Report processing steps. ‘--debug’ ‘-d’ Don’t remove the temporary files. ‘--force’ ‘-f’ Remake the template file even if newer than its input files. ‘--include=DIR’ ‘-I DIR’ Append DIR to the include path. Multiple invocations accumulate. ‘--prepend-include=DIR’ ‘-B DIR’ Prepend DIR to the include path. Multiple invocations accumulate. ‘--warnings=CATEGORY[,CATEGORY...]’ ‘-WCATEGORY[,CATEGORY...]’ Enable or disable warnings related to each CATEGORY. *Note m4_warn::, for a comprehensive list of categories. Special values include: ‘all’ Enable all categories of warnings. ‘none’ Disable all categories of warnings. ‘error’ Treat all warnings as errors. ‘no-CATEGORY’ Disable warnings falling into CATEGORY. The enviroment variable ‘WARNINGS’ may also be set to a comma-separated list of warning categories to enable or disable. It is interpreted exactly the same way as the argument of ‘--warnings’, but unknown categories are silently ignored. The command line takes precedence; for instance, if ‘WARNINGS’ is set to ‘obsolete’, but ‘-Wnone’ is given on the command line, no warnings will be issued. Some categories of warnings are on by default. Again, for details see *note m4_warn::.  File: autoconf.info, Node: Autoheader Macros, Prev: autoheader Invocation, Up: Configuration Headers 4.9.3 Autoheader Macros ----------------------- ‘autoheader’ scans ‘configure.ac’ and figures out which C preprocessor symbols it might define. It knows how to generate templates for symbols defined by ‘AC_CHECK_HEADERS’, ‘AC_CHECK_FUNCS’ etc., but if you ‘AC_DEFINE’ any additional symbol, you must define a template for it. If there are missing templates, ‘autoheader’ fails with an error message. The template for a SYMBOL is created by ‘autoheader’ from the DESCRIPTION argument to an ‘AC_DEFINE’; see *note Defining Symbols::. For special needs, you can use the following macros. -- Macro: AH_TEMPLATE (KEY, DESCRIPTION) Tell ‘autoheader’ to generate a template for KEY. This macro generates standard templates just like ‘AC_DEFINE’ when a DESCRIPTION is given. For example: AH_TEMPLATE([NULL_DEVICE], [Name of the file to open to get a null file, or a data sink.]) generates the following template, with the description properly justified. /* Name of the file to open to get a null file, or a data sink. */ #undef NULL_DEVICE -- Macro: AH_VERBATIM (KEY, TEMPLATE) Tell ‘autoheader’ to include the TEMPLATE as-is in the header template file. This TEMPLATE is associated with the KEY, which is used to sort all the different templates and guarantee their uniqueness. It should be a symbol that can be defined via ‘AC_DEFINE’. -- Macro: AH_TOP (TEXT) Include TEXT at the top of the header template file. -- Macro: AH_BOTTOM (TEXT) Include TEXT at the bottom of the header template file. Please note that TEXT gets included “verbatim” to the template file, not to the resulting config header, so it can easily get mangled when the template is processed. There is rarely a need for something other than AH_BOTTOM([#include ])  File: autoconf.info, Node: Configuration Commands, Next: Configuration Links, Prev: Configuration Headers, Up: Setup 4.10 Running Arbitrary Configuration Commands ============================================= You can execute arbitrary commands before, during, and after ‘config.status’ is run. The three following macros accumulate the commands to run when they are called multiple times. ‘AC_CONFIG_COMMANDS’ replaces the obsolete macro ‘AC_OUTPUT_COMMANDS’; see *note Obsolete Macros::, for details. -- Macro: AC_CONFIG_COMMANDS (TAG..., [CMDS], [INIT-CMDS]) Specify additional shell commands to run at the end of ‘config.status’, and shell commands to initialize any variables from ‘configure’. Associate the commands with TAG. Since typically the CMDS create a file, TAG should naturally be the name of that file. If needed, the directory hosting TAG is created. The TAG should not contain shell metacharacters. *Note Special Chars in Variables::. This macro is one of the instantiating macros; see *note Configuration Actions::. Here is an unrealistic example: fubar=42 AC_CONFIG_COMMANDS([fubar], [echo this is extra $fubar, and so on.], [fubar=$fubar]) Here is a better one: AC_CONFIG_COMMANDS([timestamp], [date >timestamp]) The following two macros look similar, but in fact they are not of the same breed: they are executed directly by ‘configure’, so you cannot use ‘config.status’ to rerun them. -- Macro: AC_CONFIG_COMMANDS_PRE (CMDS) Execute the CMDS right before creating ‘config.status’. This macro presents the last opportunity to call ‘AC_SUBST’, ‘AC_DEFINE’, or ‘AC_CONFIG_ITEMS’ macros. -- Macro: AC_CONFIG_COMMANDS_POST (CMDS) Execute the CMDS right after creating ‘config.status’.  File: autoconf.info, Node: Configuration Links, Next: Subdirectories, Prev: Configuration Commands, Up: Setup 4.11 Creating Configuration Links ================================= You may find it convenient to create links whose destinations depend upon results of tests. One can use ‘AC_CONFIG_COMMANDS’ but the creation of relative symbolic links can be delicate when the package is built in a directory different from the source directory. -- Macro: AC_CONFIG_LINKS (DEST:SOURCE..., [CMDS], [INIT-CMDS]) Make ‘AC_OUTPUT’ link each of the existing files SOURCE to the corresponding link name DEST. Makes a symbolic link if possible, otherwise a hard link if possible, otherwise a copy. The DEST and SOURCE names should be relative to the top level source or build directory, and should not contain shell metacharacters. *Note Special Chars in Variables::. This macro is one of the instantiating macros; see *note Configuration Actions::. For example, this call: AC_CONFIG_LINKS([host.h:config/$machine.h object.h:config/$obj_format.h]) creates in the current directory ‘host.h’ as a link to ‘SRCDIR/config/$machine.h’, and ‘object.h’ as a link to ‘SRCDIR/config/$obj_format.h’. The tempting value ‘.’ for DEST is invalid: it makes it impossible for ‘config.status’ to guess the links to establish. One can then run: ./config.status host.h object.h to create the links.  File: autoconf.info, Node: Subdirectories, Next: Default Prefix, Prev: Configuration Links, Up: Setup 4.12 Configuring Other Packages in Subdirectories ================================================= In most situations, calling ‘AC_OUTPUT’ is sufficient to produce makefiles in subdirectories. However, ‘configure’ scripts that control more than one independent package can use ‘AC_CONFIG_SUBDIRS’ to run ‘configure’ scripts for other packages in subdirectories. -- Macro: AC_CONFIG_SUBDIRS (DIR ...) Make ‘AC_OUTPUT’ run ‘configure’ in each subdirectory DIR in the given blank-or-newline-separated list. Each DIR should be a literal, i.e., please do not use: if test "x$package_foo_enabled" = xyes; then my_subdirs="$my_subdirs foo" fi AC_CONFIG_SUBDIRS([$my_subdirs]) because this prevents ‘./configure --help=recursive’ from displaying the options of the package ‘foo’. Instead, you should write: if test "x$package_foo_enabled" = xyes; then AC_CONFIG_SUBDIRS([foo]) fi If a given DIR is not found at ‘configure’ run time, a warning is reported; if the subdirectory is optional, write: if test -d "$srcdir/foo"; then AC_CONFIG_SUBDIRS([foo]) fi If a given DIR contains ‘configure.gnu’, it is run instead of ‘configure’. This is for packages that might use a non-Autoconf script ‘Configure’, which can’t be called through a wrapper ‘configure’ since it would be the same file on case-insensitive file systems. The subdirectory ‘configure’ scripts are given the same command line options that were given to this ‘configure’ script, with minor changes if needed, which include: − adjusting a relative name for the cache file; − adjusting a relative name for the source directory; − propagating the current value of ‘$prefix’, including if it was defaulted, and if the default values of the top level and of the subdirectory ‘configure’ differ. This macro also sets the output variable ‘subdirs’ to the list of directories ‘DIR ...’. Make rules can use this variable to determine which subdirectories to recurse into. This macro may be called multiple times.  File: autoconf.info, Node: Default Prefix, Prev: Subdirectories, Up: Setup 4.13 Default Prefix =================== By default, ‘configure’ sets the prefix for files it installs to ‘/usr/local’. The user of ‘configure’ can select a different prefix using the ‘--prefix’ and ‘--exec-prefix’ options. There are two ways to change the default: when creating ‘configure’, and when running it. Some software packages might want to install in a directory other than ‘/usr/local’ by default. To accomplish that, use the ‘AC_PREFIX_DEFAULT’ macro. -- Macro: AC_PREFIX_DEFAULT (PREFIX) Set the default installation prefix to PREFIX instead of ‘/usr/local’. It may be convenient for users to have ‘configure’ guess the installation prefix from the location of a related program that they have already installed. If you wish to do that, you can call ‘AC_PREFIX_PROGRAM’. -- Macro: AC_PREFIX_PROGRAM (PROGRAM) If the user did not specify an installation prefix (using the ‘--prefix’ option), guess a value for it by looking for PROGRAM in ‘PATH’, the way the shell does. If PROGRAM is found, set the prefix to the parent of the directory containing PROGRAM, else default the prefix as described above (‘/usr/local’ or ‘AC_PREFIX_DEFAULT’). For example, if PROGRAM is ‘gcc’ and the ‘PATH’ contains ‘/usr/local/gnu/bin/gcc’, set the prefix to ‘/usr/local/gnu’.  File: autoconf.info, Node: Existing Tests, Next: Writing Tests, Prev: Setup, Up: Top 5 Existing Tests **************** These macros test for particular system features that packages might need or want to use. If you need to test for a kind of feature that none of these macros check for, you can probably do it by calling primitive test macros with appropriate arguments (*note Writing Tests::). These tests print messages telling the user which feature they’re checking for, and what they find. They cache their results for future ‘configure’ runs (*note Caching Results::). Some of these macros set output variables. *Note Makefile Substitutions::, for how to get their values. The phrase “define NAME” is used below as a shorthand to mean “define the C preprocessor symbol NAME to the value 1”. *Note Defining Symbols::, for how to get those symbol definitions into your program. * Menu: * Common Behavior:: Macros’ standard schemes * Alternative Programs:: Selecting between alternative programs * Files:: Checking for the existence of files * Libraries:: Library archives that might be missing * Library Functions:: C library functions that might be missing * Header Files:: Header files that might be missing * Declarations:: Declarations that may be missing * Structures:: Structures or members that might be missing * Types:: Types that might be missing * Compilers and Preprocessors:: Checking for compiling programs * System Services:: Operating system services * C and Posix Variants:: Kludges for C and Posix variants * Erlang Libraries:: Checking for the existence of Erlang libraries  File: autoconf.info, Node: Common Behavior, Next: Alternative Programs, Up: Existing Tests 5.1 Common Behavior =================== Much effort has been expended to make Autoconf easy to learn. The most obvious way to reach this goal is simply to enforce standard interfaces and behaviors, avoiding exceptions as much as possible. Because of history and inertia, unfortunately, there are still too many exceptions in Autoconf; nevertheless, this section describes some of the common rules. * Menu: * Standard Symbols:: Symbols defined by the macros * Default Includes:: Includes used by the generic macros  File: autoconf.info, Node: Standard Symbols, Next: Default Includes, Up: Common Behavior 5.1.1 Standard Symbols ---------------------- All the generic macros that ‘AC_DEFINE’ a symbol as a result of their test transform their ARGUMENT values to a standard alphabet. First, ARGUMENT is converted to upper case and any asterisks (‘*’) are each converted to ‘P’. Any remaining characters that are not alphanumeric are converted to underscores. For instance, AC_CHECK_TYPES([struct $Expensive*]) defines the symbol ‘HAVE_STRUCT__EXPENSIVEP’ if the check succeeds.  File: autoconf.info, Node: Default Includes, Prev: Standard Symbols, Up: Common Behavior 5.1.2 Default Includes ---------------------- Test programs frequently need to include headers that may or may not be available on the system whose features are being tested. Each test can use all the preprocessor macros that have been ‘AC_DEFINE’d by previous tests, so for example one may write #include #ifdef HAVE_SYS_TIME_H # include #endif if ‘sys/time.h’ has already been tested for. All hosted environments that are still of interest for portable code provide all of the headers specified in ISO C90 (as amended in 1995): ‘assert.h’, ‘ctype.h’, ‘errno.h’, ‘float.h’, ‘iso646.h’, ‘limits.h’, ‘locale.h’, ‘math.h’, ‘setjmp.h’, ‘signal.h’, ‘stdarg.h’, ‘stddef.h’, ‘stdio.h’, ‘stdlib.h’, ‘string.h’, ‘time.h’, ‘wchar.h’, and ‘wctype.h’. Most programs can safely include these headers unconditionally. All other headers, including all headers from later revisions of the C standard, need to be tested for (*note Header Files::). If your program needs to be portable to a _freestanding_ environment, such as an embedded OS that doesn’t provide all of the facilities of the C90 standard library, you may need to test for some of the above headers as well. Note that many Autoconf macros internally assume that the complete set of C90 headers are available. Most generic macros use the following macro to provide a default set of includes: -- Macro: AC_INCLUDES_DEFAULT ([INCLUDE-DIRECTIVES]) Expand to INCLUDE-DIRECTIVES if present and nonempty, otherwise to: #include #ifdef HAVE_STDIO_H # include #endif #ifdef HAVE_STDLIB_H # include #endif #ifdef HAVE_STRING_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif Using this macro without INCLUDE-DIRECTIVES has the side effect of checking for ‘stdio.h’, ‘stdlib.h’, ‘string.h’, ‘inttypes.h’, ‘stdint.h’, ‘strings.h’, ‘sys/types.h’, ‘sys/stat.h’, and ‘unistd.h’, as if by ‘AC_CHECK_HEADERS_ONCE’. For backward compatibility, the macro ‘STDC_HEADERS’ will be defined when both ‘stdlib.h’ and ‘string.h’ are available. *Portability Note:* It is safe for most programs to assume the presence of all of the headers required by the original 1990 C standard. ‘AC_INCLUDES_DEFAULT’ checks for ‘stdio.h’, ‘stdlib.h’, and ‘string.h’, even though they are in that list, because they might not be available when compiling for a “freestanding environment” (in which most of the features of the C library are optional). You probably do not need to write ‘#ifdef HAVE_STDIO_H’ in your own code. ‘inttypes.h’ and ‘stdint.h’ were added to C in the 1999 revision of the standard, and ‘strings.h’, ‘sys/types.h’, ‘sys/stat.h’, and ‘unistd.h’ are POSIX extensions. You _should_ guard uses of these headers with appropriate conditionals. -- Macro: AC_CHECK_INCLUDES_DEFAULT Check for all the headers that ‘AC_INCLUDES_DEFAULT’ would check for as a side-effect, if this has not already happened. This macro mainly exists so that ‘autoupdate’ can replace certain obsolete constructs with it. You should not need to use it yourself; in fact, it is likely to be safe to delete it from any script in which it appears. (‘autoupdate’ does not know whether preprocessor macros such as ‘HAVE_STDINT_H’ are used in the program, nor whether they would get defined as a side-effect of other checks.)  File: autoconf.info, Node: Alternative Programs, Next: Files, Prev: Common Behavior, Up: Existing Tests 5.2 Alternative Programs ======================== These macros check for the presence or behavior of particular programs. They are used to choose between several alternative programs and to decide what to do once one has been chosen. If there is no macro specifically defined to check for a program you need, and you don’t need to check for any special properties of it, then you can use one of the general program-check macros. * Menu: * Particular Programs:: Special handling to find certain programs * Generic Programs:: How to find other programs  File: autoconf.info, Node: Particular Programs, Next: Generic Programs, Up: Alternative Programs 5.2.1 Particular Program Checks ------------------------------- These macros check for particular programs—whether they exist, and in some cases whether they support certain features. -- Macro: AC_PROG_AWK Check for ‘gawk’, ‘mawk’, ‘nawk’, and ‘awk’, in that order, and set output variable ‘AWK’ to the first one that is found. It tries ‘gawk’ first because that is reported to be the best implementation. The result can be overridden by setting the variable ‘AWK’ or the cache variable ‘ac_cv_prog_AWK’. Using this macro is sufficient to avoid the pitfalls of traditional ‘awk’ (*note Limitations of Usual Tools: awk.). -- Macro: AC_PROG_GREP Look for the best available ‘grep’ or ‘ggrep’ that accepts the longest input lines possible, and that supports multiple ‘-e’ options. Set the output variable ‘GREP’ to whatever is chosen. *Note Limitations of Usual Tools: grep, for more information about portability problems with the ‘grep’ command family. The result can be overridden by setting the ‘GREP’ variable and is cached in the ‘ac_cv_path_GREP’ variable. -- Macro: AC_PROG_EGREP Check whether ‘$GREP -E’ works, or else look for the best available ‘egrep’ or ‘gegrep’ that accepts the longest input lines possible. Set the output variable ‘EGREP’ to whatever is chosen. The result can be overridden by setting the ‘EGREP’ variable and is cached in the ‘ac_cv_path_EGREP’ variable. -- Macro: AC_PROG_FGREP Check whether ‘$GREP -F’ works, or else look for the best available ‘fgrep’ or ‘gfgrep’ that accepts the longest input lines possible. Set the output variable ‘FGREP’ to whatever is chosen. The result can be overridden by setting the ‘FGREP’ variable and is cached in the ‘ac_cv_path_FGREP’ variable. -- Macro: AC_PROG_INSTALL Set output variable ‘INSTALL’ to the name of a BSD-compatible ‘install’ program, if one is found in the current ‘PATH’. Otherwise, set ‘INSTALL’ to ‘DIR/install-sh -c’, checking the directories specified to ‘AC_CONFIG_AUX_DIR’ (or its default directories) to determine DIR (*note Output::). Also set the variables ‘INSTALL_PROGRAM’ and ‘INSTALL_SCRIPT’ to ‘${INSTALL}’ and ‘INSTALL_DATA’ to ‘${INSTALL} -m 644’. ‘@INSTALL@’ is special, as its value may vary for different configuration files. This macro screens out various instances of ‘install’ known not to work. It prefers to find a C program rather than a shell script, for speed. Instead of ‘install-sh’, it can also use ‘install.sh’, but that name is obsolete because some ‘make’ programs have a rule that creates ‘install’ from it if there is no makefile. Further, this macro requires ‘install’ to be able to install multiple files into a target directory in a single invocation. Autoconf comes with a copy of ‘install-sh’ that you can use. If you use ‘AC_PROG_INSTALL’, you must include ‘install-sh’ in your distribution; otherwise ‘autoreconf’ and ‘configure’ will produce an error message saying they can’t find it—even if the system you’re on has a good ‘install’ program. This check is a safety measure to prevent you from accidentally leaving that file out, which would prevent your package from installing on systems that don’t have a BSD-compatible ‘install’ program. If you need to use your own installation program because it has features not found in standard ‘install’ programs, there is no reason to use ‘AC_PROG_INSTALL’; just put the file name of your program into your ‘Makefile.in’ files. The result of the test can be overridden by setting the variable ‘INSTALL’ or the cache variable ‘ac_cv_path_install’. -- Macro: AC_PROG_MKDIR_P Set output variable ‘MKDIR_P’ to a program that ensures that for each argument, a directory named by this argument exists, creating it and its parent directories if needed, and without race conditions when two instances of the program attempt to make the same directory at nearly the same time. This macro uses the ‘mkdir -p’ command if possible. Otherwise, it falls back on invoking ‘install-sh’ with the ‘-d’ option, so your package should contain ‘install-sh’ as described under ‘AC_PROG_INSTALL’. An ‘install-sh’ file that predates Autoconf 2.60 or Automake 1.10 is vulnerable to race conditions, so if you want to support parallel installs from different packages into the same directory you need to make sure you have an up-to-date ‘install-sh’. In particular, be careful about using ‘autoreconf -if’ if your Automake predates Automake 1.10. This macro is related to the ‘AS_MKDIR_P’ macro (*note Programming in M4sh::), but it sets an output variable intended for use in other files, whereas ‘AS_MKDIR_P’ is intended for use in scripts like ‘configure’. Also, ‘AS_MKDIR_P’ does not accept options, but ‘MKDIR_P’ supports the ‘-m’ option, e.g., a makefile might invoke ‘$(MKDIR_P) -m 0 dir’ to create an inaccessible directory, and conversely a makefile should use ‘$(MKDIR_P) -- $(FOO)’ if FOO might yield a value that begins with ‘-’. Finally, ‘AS_MKDIR_P’ does not check for race condition vulnerability, whereas ‘AC_PROG_MKDIR_P’ does. ‘@MKDIR_P@’ is special, as its value may vary for different configuration files. The result of the test can be overridden by setting the variable ‘MKDIR_P’ or the cache variable ‘ac_cv_path_mkdir’. -- Macro: AC_PROG_LEX (OPTIONS) Search for a lexical analyzer generator, preferring ‘flex’ to plain ‘lex’. Output variable ‘LEX’ is set to whichever program is available. If neither program is available, ‘LEX’ is set to ‘:’; for packages that ship the generated ‘file.yy.c’ alongside the source ‘file.l’, this default allows users without a lexer generator to still build the package even if the timestamp for ‘file.l’ is inadvertently changed. The name of the program to use can be overridden by setting the output variable ‘LEX’ or the cache variable ‘ac_cv_prog_LEX’ when running ‘configure’. If a lexical analyzer generator is found, this macro performs additional checks for common portability pitfalls. If these additional checks fail, ‘LEX’ is reset to ‘:’; otherwise the following additional macros and variables are provided. Preprocessor macro ‘YYTEXT_POINTER’ is defined if the lexer skeleton, by default, declares ‘yytext’ as a ‘char *’ rather than a ‘char []’. Output variable ‘LEX_OUTPUT_ROOT’ is set to the base of the file name that the lexer generates; this is usually either ‘lex.yy’ or ‘lexyy’. If generated lexers need a library to work, output variable ‘LEXLIB’ is set to a link option for that library (e.g., ‘-ll’), otherwise it is set to empty. The OPTIONS argument modifies the behavior of ‘AC_PROG_LEX’. It should be a whitespace-separated list of options. Currently there are only two options, and they are mutually exclusive: ‘yywrap’ Indicate that the library in ‘LEXLIB’ needs to define the function ‘yywrap’. If a library that defines this function cannot be found, ‘LEX’ will be reset to ‘:’. ‘noyywrap’ Indicate that the library in ‘LEXLIB’ does not need to define the function ‘yywrap’. ‘configure’ will not search for it at all. Prior to Autoconf 2.70, ‘AC_PROG_LEX’ did not take any arguments, and its behavior was different from either of the above possibilities: it would search for a library that defines ‘yywrap’, and would set ‘LEXLIB’ to that library if it finds one. However, if a library that defines this function could not be found, ‘LEXLIB’ would be left empty and ‘LEX’ would _not_ be reset. This behavior was due to a bug, but several packages came to depend on it, so ‘AC_PROG_LEX’ still does this if neither the ‘yywrap’ nor the ‘noyywrap’ option is given. Usage of ‘AC_PROG_LEX’ without choosing one of the ‘yywrap’ or ‘noyywrap’ options is deprecated. It is usually better to use ‘noyywrap’ and define the ‘yywrap’ function yourself, as this almost always renders the ‘LEXLIB’ unnecessary. *Caution:* As a side-effect of the test, this macro may delete any file in the configure script’s current working directory named ‘lex.yy.c’ or ‘lexyy.c’. *Caution:* Packages that ship a generated ‘lex.yy.c’ cannot assume that the definition of ‘YYTEXT_POINTER’ matches the code in that file. They also cannot assume that ‘LEXLIB’ provides the library routines required by the code in that file. If you use Flex to generate ‘lex.yy.c’, you can work around these limitations by defining ‘yywrap’ and ‘main’ yourself (rendering ‘-lfl’ unnecessary), and by using either the ‘--array’ or ‘--pointer’ options to control how ‘yytext’ is declared. The code generated by Flex is also more portable than the code generated by historical versions of Lex. If you have used Flex to generate ‘lex.yy.c’, and especially if your scanner depends on Flex features, we recommend you use this Autoconf snippet to prevent the scanner being regenerated with historical Lex: AC_PROG_LEX if test "x$LEX" != xflex; then LEX="$SHELL $missing_dir/missing flex" AC_SUBST([LEX_OUTPUT_ROOT], [lex.yy]) AC_SUBST([LEXLIB], ['']) fi The shell script ‘missing’ can be found in the Automake distribution. Remember that the user may have supplied an alternate location in ‘LEX’, so if Flex is required, it is better to check that the user provided something sufficient by parsing the output of ‘$LEX --version’ than by simply relying on ‘test "x$LEX" = xflex’. -- Macro: AC_PROG_LN_S If ‘ln -s’ works on the current file system (the operating system and file system support symbolic links), set the output variable ‘LN_S’ to ‘ln -s’; otherwise, if ‘ln’ works, set ‘LN_S’ to ‘ln’, and otherwise set it to ‘cp -pR’. If you make a link in a directory other than the current directory, its meaning depends on whether ‘ln’ or ‘ln -s’ is used. To safely create links using ‘$(LN_S)’, either find out which form is used and adjust the arguments, or always invoke ‘ln’ in the directory where the link is to be created. In other words, it does not work to do: $(LN_S) foo /x/bar Instead, do: (cd /x && $(LN_S) foo bar) -- Macro: AC_PROG_RANLIB Set output variable ‘RANLIB’ to ‘ranlib’ if ‘ranlib’ is found, and otherwise to ‘:’ (do nothing). -- Macro: AC_PROG_SED Set output variable ‘SED’ to a Sed implementation that conforms to Posix and does not have arbitrary length limits. Report an error if no acceptable Sed is found. *Note Limitations of Usual Tools: sed, for more information about portability problems with Sed. The result of this test can be overridden by setting the ‘SED’ variable and is cached in the ‘ac_cv_path_SED’ variable. -- Macro: AC_PROG_YACC If ‘bison’ is found, set output variable ‘YACC’ to ‘bison -y’. Otherwise, if ‘byacc’ is found, set ‘YACC’ to ‘byacc’. Otherwise set ‘YACC’ to ‘yacc’. The result of this test can be influenced by setting the variable ‘YACC’ or the cache variable ‘ac_cv_prog_YACC’.  File: autoconf.info, Node: Generic Programs, Prev: Particular Programs, Up: Alternative Programs 5.2.2 Generic Program and File Checks ------------------------------------- These macros are used to find programs not covered by the “particular” test macros. If you need to check the behavior of a program as well as find out whether it is present, you have to write your own test for it (*note Writing Tests::). By default, these macros use the environment variable ‘PATH’. If you need to check for a program that might not be in the user’s ‘PATH’, you can pass a modified path to use instead, like this: AC_PATH_PROG([INETD], [inetd], [/usr/libexec/inetd], [$PATH$PATH_SEPARATOR/usr/libexec$PATH_SEPARATOR]dnl [/usr/sbin$PATH_SEPARATOR/usr/etc$PATH_SEPARATOR/etc]) You are strongly encouraged to declare the VARIABLE passed to ‘AC_CHECK_PROG’ etc. as precious. *Note Setting Output Variables::, ‘AC_ARG_VAR’, for more details. -- Macro: AC_CHECK_PROG (VARIABLE, PROG-TO-CHECK-FOR, VALUE-IF-FOUND, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’], [REJECT]) Check whether program PROG-TO-CHECK-FOR exists in PATH. If it is found, set VARIABLE to VALUE-IF-FOUND, otherwise to VALUE-IF-NOT-FOUND, if given. Always pass over REJECT (an absolute file name) even if it is the first found in the search path; in that case, set VARIABLE using the absolute file name of the PROG-TO-CHECK-FOR found that is not REJECT. If VARIABLE was already set, do nothing. Calls ‘AC_SUBST’ for VARIABLE. The result of this test can be overridden by setting the VARIABLE variable or the cache variable ‘ac_cv_prog_VARIABLE’. -- Macro: AC_CHECK_PROGS (VARIABLE, PROGS-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Check for each program in the blank-separated list PROGS-TO-CHECK-FOR existing in the PATH. If one is found, set VARIABLE to the name of that program. Otherwise, continue checking the next program in the list. If none of the programs in the list are found, set VARIABLE to VALUE-IF-NOT-FOUND; if VALUE-IF-NOT-FOUND is not specified, the value of VARIABLE is not changed. Calls ‘AC_SUBST’ for VARIABLE. The result of this test can be overridden by setting the VARIABLE variable or the cache variable ‘ac_cv_prog_VARIABLE’. -- Macro: AC_CHECK_TARGET_TOOL (VARIABLE, PROG-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Like ‘AC_CHECK_PROG’, but first looks for PROG-TO-CHECK-FOR with a prefix of the target type as determined by ‘AC_CANONICAL_TARGET’, followed by a dash (*note Canonicalizing::). If the tool cannot be found with a prefix, and if the build and target types are equal, then it is also searched for without a prefix. As noted in *note Specifying Target Triplets::, the target is rarely specified, because most of the time it is the same as the host: it is the type of system for which any compiler tool in the package produces code. What this macro looks for is, for example, _a tool (assembler, linker, etc.) that the compiler driver (‘gcc’ for the GNU C Compiler) uses to produce objects, archives or executables_. -- Macro: AC_CHECK_TOOL (VARIABLE, PROG-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Like ‘AC_CHECK_PROG’, but first looks for PROG-TO-CHECK-FOR with a prefix of the host type as specified by ‘--host’, followed by a dash. For example, if the user runs ‘configure --build=x86_64-gnu --host=aarch64-linux-gnu’, then this call: AC_CHECK_TOOL([RANLIB], [ranlib], [:]) sets ‘RANLIB’ to ‘aarch64-linux-gnu-ranlib’ if that program exists in PATH, or otherwise to ‘ranlib’ if that program exists in PATH, or to ‘:’ if neither program exists. When cross-compiling, this macro will issue a warning if no program prefixed with the host type could be found. For more information, see *note Specifying Target Triplets::. -- Macro: AC_CHECK_TARGET_TOOLS (VARIABLE, PROGS-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Like ‘AC_CHECK_TARGET_TOOL’, each of the tools in the list PROGS-TO-CHECK-FOR are checked with a prefix of the target type as determined by ‘AC_CANONICAL_TARGET’, followed by a dash (*note Canonicalizing::). If none of the tools can be found with a prefix, and if the build and target types are equal, then the first one without a prefix is used. If a tool is found, set VARIABLE to the name of that program. If none of the tools in the list are found, set VARIABLE to VALUE-IF-NOT-FOUND; if VALUE-IF-NOT-FOUND is not specified, the value of VARIABLE is not changed. Calls ‘AC_SUBST’ for VARIABLE. -- Macro: AC_CHECK_TOOLS (VARIABLE, PROGS-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Like ‘AC_CHECK_TOOL’, each of the tools in the list PROGS-TO-CHECK-FOR are checked with a prefix of the host type as determined by ‘AC_CANONICAL_HOST’, followed by a dash (*note Canonicalizing::). If none of the tools can be found with a prefix, then the first one without a prefix is used. If a tool is found, set VARIABLE to the name of that program. If none of the tools in the list are found, set VARIABLE to VALUE-IF-NOT-FOUND; if VALUE-IF-NOT-FOUND is not specified, the value of VARIABLE is not changed. Calls ‘AC_SUBST’ for VARIABLE. When cross-compiling, this macro will issue a warning if no program prefixed with the host type could be found. For more information, see *note Specifying Target Triplets::. -- Macro: AC_PATH_PROG (VARIABLE, PROG-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Like ‘AC_CHECK_PROG’, but set VARIABLE to the absolute name of PROG-TO-CHECK-FOR if found. The result of this test can be overridden by setting the VARIABLE variable. A positive result of this test is cached in the ‘ac_cv_path_VARIABLE’ variable. -- Macro: AC_PATH_PROGS (VARIABLE, PROGS-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Like ‘AC_CHECK_PROGS’, but if any of PROGS-TO-CHECK-FOR are found, set VARIABLE to the absolute name of the program found. The result of this test can be overridden by setting the VARIABLE variable. A positive result of this test is cached in the ‘ac_cv_path_VARIABLE’ variable. -- Macro: AC_PATH_PROGS_FEATURE_CHECK (VARIABLE, PROGS-TO-CHECK-FOR, FEATURE-TEST, [ACTION-IF-NOT-FOUND], [PATH = ‘$PATH’]) This macro was introduced in Autoconf 2.62. If VARIABLE is not empty, then set the cache variable ‘ac_cv_path_VARIABLE’ to its value. Otherwise, check for each program in the blank-separated list PROGS-TO-CHECK-FOR existing in PATH. For each program found, execute FEATURE-TEST with ‘ac_path_VARIABLE’ set to the absolute name of the candidate program. If no invocation of FEATURE-TEST sets the shell variable ‘ac_cv_path_VARIABLE’, then ACTION-IF-NOT-FOUND is executed. FEATURE-TEST will be run even when ‘ac_cv_path_VARIABLE’ is set, to provide the ability to choose a better candidate found later in PATH; to accept the current setting and bypass all further checks, FEATURE-TEST can execute ‘ac_path_VARIABLE_found=:’. Note that this macro has some subtle differences from ‘AC_CHECK_PROGS’. It is designed to be run inside ‘AC_CACHE_VAL’, therefore, it should have no side effects. In particular, VARIABLE is not set to the final value of ‘ac_cv_path_VARIABLE’, nor is ‘AC_SUBST’ automatically run. Also, on failure, any action can be performed, whereas ‘AC_CHECK_PROGS’ only performs ‘VARIABLE=VALUE-IF-NOT-FOUND’. Here is an example, similar to what Autoconf uses in its own configure script. It will search for an implementation of ‘m4’ that supports the ‘indir’ builtin, even if it goes by the name ‘gm4’ or is not the first implementation on ‘PATH’. AC_CACHE_CHECK([for m4 that supports indir], [ac_cv_path_M4], [AC_PATH_PROGS_FEATURE_CHECK([M4], [m4 gm4], [[m4out=`echo 'changequote([,])indir([divnum])' | $ac_path_M4` test "x$m4out" = x0 \ && ac_cv_path_M4=$ac_path_M4 ac_path_M4_found=:]], [AC_MSG_ERROR([could not find m4 that supports indir])])]) AC_SUBST([M4], [$ac_cv_path_M4]) -- Macro: AC_PATH_TARGET_TOOL (VARIABLE, PROG-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Like ‘AC_CHECK_TARGET_TOOL’, but set VARIABLE to the absolute name of the program if it is found. -- Macro: AC_PATH_TOOL (VARIABLE, PROG-TO-CHECK-FOR, [VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Like ‘AC_CHECK_TOOL’, but set VARIABLE to the absolute name of the program if it is found. When cross-compiling, this macro will issue a warning if no program prefixed with the host type could be found. For more information, see *note Specifying Target Triplets::.  File: autoconf.info, Node: Files, Next: Libraries, Prev: Alternative Programs, Up: Existing Tests 5.3 Files ========= You might also need to check for the existence of files. Before using these macros, ask yourself whether a runtime test might not be a better solution. Be aware that, like most Autoconf macros, they test a feature of the host machine, and therefore, they die when cross-compiling. -- Macro: AC_CHECK_FILE (FILE, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) Check whether file FILE exists on the native system. If it is found, execute ACTION-IF-FOUND, otherwise do ACTION-IF-NOT-FOUND, if given. Cache the result of this test in the ‘ac_cv_file_FILE’ variable, with characters not suitable for a variable name mapped to underscores. -- Macro: AC_CHECK_FILES (FILES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) For each file listed in FILES, execute ‘AC_CHECK_FILE’ and perform either ACTION-IF-FOUND or ACTION-IF-NOT-FOUND. Like ‘AC_CHECK_FILE’, this defines ‘HAVE_FILE’ (*note Standard Symbols::) for each file found and caches the results of each test in the ‘ac_cv_file_FILE’ variable, with characters not suitable for a variable name mapped to underscores.  File: autoconf.info, Node: Libraries, Next: Library Functions, Prev: Files, Up: Existing Tests 5.4 Library Files ================= The following macros check for the presence of certain C, C++, Fortran, or Go library archive files. -- Macro: AC_CHECK_LIB (LIBRARY, FUNCTION, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [OTHER-LIBRARIES]) Test whether the library LIBRARY is available by trying to link a test program that calls function FUNCTION with the library. FUNCTION should be a function provided by the library. Use the base name of the library; e.g., to check for ‘-lmp’, use ‘mp’ as the LIBRARY argument. ACTION-IF-FOUND is a list of shell commands to run if the link with the library succeeds; ACTION-IF-NOT-FOUND is a list of shell commands to run if the link fails. If ACTION-IF-FOUND is not specified, the default action prepends ‘-lLIBRARY’ to ‘LIBS’ and defines ‘HAVE_LIBLIBRARY’ (in all capitals). This macro is intended to support building ‘LIBS’ in a right-to-left (least-dependent to most-dependent) fashion such that library dependencies are satisfied as a natural side effect of consecutive tests. Linkers are sensitive to library ordering so the order in which ‘LIBS’ is generated is important to reliable detection of libraries. If linking with LIBRARY results in unresolved symbols that would be resolved by linking with additional libraries, give those libraries as the OTHER-LIBRARIES argument, separated by spaces: e.g., ‘-lXt -lX11’. Otherwise, this macro may fail to detect that LIBRARY is present, because linking the test program can fail with unresolved symbols. The OTHER-LIBRARIES argument should be limited to cases where it is desirable to test for one library in the presence of another that is not already in ‘LIBS’. ‘AC_CHECK_LIB’ requires some care in usage, and should be avoided in some common cases. Many standard functions like ‘gethostbyname’ appear in the standard C library on some hosts, and in special libraries like ‘nsl’ on other hosts. On some hosts the special libraries contain variant implementations that you may not want to use. These days it is normally better to use ‘AC_SEARCH_LIBS([gethostbyname], [nsl])’ instead of ‘AC_CHECK_LIB([nsl], [gethostbyname])’. The result of this test is cached in the ‘ac_cv_lib_LIBRARY_FUNCTION’ variable. -- Macro: AC_SEARCH_LIBS (FUNCTION, SEARCH-LIBS, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [OTHER-LIBRARIES]) Search for a library defining FUNCTION if it’s not already available. This equates to calling ‘AC_LINK_IFELSE([AC_LANG_CALL([], [FUNCTION])])’ first with no libraries, then for each library listed in SEARCH-LIBS. Prepend ‘-lLIBRARY’ to ‘LIBS’ for the first library found to contain FUNCTION, and run ACTION-IF-FOUND. If the function is not found, run ACTION-IF-NOT-FOUND. If linking with LIBRARY results in unresolved symbols that would be resolved by linking with additional libraries, give those libraries as the OTHER-LIBRARIES argument, separated by spaces: e.g., ‘-lXt -lX11’. Otherwise, this macro fails to detect that FUNCTION is present, because linking the test program always fails with unresolved symbols. The result of this test is cached in the ‘ac_cv_search_FUNCTION’ variable as ‘none required’ if FUNCTION is already available, as ‘no’ if no library containing FUNCTION was found, otherwise as the ‘-lLIBRARY’ option that needs to be prepended to ‘LIBS’.  File: autoconf.info, Node: Library Functions, Next: Header Files, Prev: Libraries, Up: Existing Tests 5.5 Library Functions ===================== The following macros check for particular C library functions. If there is no macro specifically defined to check for a function you need, and you don’t need to check for any special properties of it, then you can use one of the general function-check macros. * Menu: * Function Portability:: Pitfalls with usual functions * Particular Functions:: Special handling to find certain functions * Generic Functions:: How to find other functions  File: autoconf.info, Node: Function Portability, Next: Particular Functions, Up: Library Functions 5.5.1 Portability of C Functions -------------------------------- Most usual functions can either be missing, or be buggy, or be limited on some architectures. This section tries to make an inventory of these portability issues. By definition, this list always requires additions. A much more complete list is maintained by the Gnulib project (*note Gnulib::), covering *note Current Posix Functions: (gnulib)Function Substitutes, *note Legacy Functions: (gnulib)Legacy Function Substitutes, and *note Glibc Functions: (gnulib)Glibc Function Substitutes. Please help us keep the Gnulib list as complete as possible. ‘exit’ On ancient hosts, ‘exit’ returned ‘int’. This is because ‘exit’ predates ‘void’, and there was a long tradition of it returning ‘int’. On current hosts, the problem more likely is that ‘exit’ is not declared, due to C++ problems of some sort or another. For this reason we suggest that test programs not invoke ‘exit’, but return from ‘main’ instead. ‘free’ The C standard says a call ‘free (NULL)’ does nothing, but some old systems don’t support this (e.g., NextStep). ‘isinf’ ‘isnan’ In C99 and later, ‘isinf’ and ‘isnan’ are macros. On some systems just macros are available (e.g., HP-UX and Solaris 10), on some systems both macros and functions (e.g., glibc 2.3.2), and on some systems only functions (e.g., IRIX 6 and Solaris 9). In some cases these functions are declared in nonstandard headers like ‘’ and defined in non-default libraries like ‘-lm’ or ‘-lsunmath’. In C99 and later, ‘isinf’ and ‘isnan’ macros work correctly with ‘long double’ arguments, but pre-C99 systems that use functions typically assume ‘double’ arguments. On such a system, ‘isinf’ incorrectly returns true for a finite ‘long double’ argument that is outside the range of ‘double’. The best workaround for these issues is to use Gnulib modules ‘isinf’ and ‘isnan’ (*note Gnulib::). But a lighter weight solution involves code like the following. #include #ifndef isnan # define isnan(x) \ (sizeof (x) == sizeof (long double) ? isnan_ld (x) \ : sizeof (x) == sizeof (double) ? isnan_d (x) \ : isnan_f (x)) static int isnan_f (float x) { return x != x; } static int isnan_d (double x) { return x != x; } static int isnan_ld (long double x) { return x != x; } #endif #ifndef isinf # define isinf(x) \ (sizeof (x) == sizeof (long double) ? isinf_ld (x) \ : sizeof (x) == sizeof (double) ? isinf_d (x) \ : isinf_f (x)) static int isinf_f (float x) { return !isnan (x) && isnan (x - x); } static int isinf_d (double x) { return !isnan (x) && isnan (x - x); } static int isinf_ld (long double x) { return !isnan (x) && isnan (x - x); } #endif Some optimizing compilers mishandle these definitions, but systems with that bug typically have many other floating point corner-case compliance problems anyway, so it’s probably not worth worrying about. ‘malloc’ The C standard says a call ‘malloc (0)’ is implementation dependent. It can return either ‘NULL’ or a new non-null pointer. The latter is more common (e.g., the GNU C Library) but is by no means universal. ‘AC_FUNC_MALLOC’ can be used to insist on non-‘NULL’ (*note Particular Functions::). ‘putenv’ Posix prefers ‘setenv’ to ‘putenv’; among other things, ‘putenv’ is not required of all Posix implementations, but ‘setenv’ is. Posix specifies that ‘putenv’ puts the given string directly in ‘environ’, but some systems make a copy of it instead (e.g., glibc 2.0, or BSD). And when a copy is made, ‘unsetenv’ might not free it, causing a memory leak (e.g., FreeBSD 4). On some systems ‘putenv ("FOO")’ removes ‘FOO’ from the environment, but this is not standard usage and it dumps core on some systems (e.g., AIX). On MinGW, a call ‘putenv ("FOO=")’ removes ‘FOO’ from the environment, rather than inserting it with an empty value. ‘realloc’ The C standard says a call ‘realloc (NULL, size)’ is equivalent to ‘malloc (size)’, but some old systems don’t support this (e.g., NextStep). ‘signal’ handler Normally ‘signal’ takes a handler function with a return type of ‘void’, but some old systems required ‘int’ instead. Any actual ‘int’ value returned is not used; this is only a difference in the function prototype demanded. All systems we know of in current use return ‘void’. The ‘int’ was to support K&R C, where of course ‘void’ is not available. The obsolete macro ‘AC_TYPE_SIGNAL’ (*note AC_TYPE_SIGNAL::) can be used to establish the correct type in all cases. In most cases, it is more robust to use ‘sigaction’ when it is available, rather than ‘signal’. ‘snprintf’ In C99 and later, if the output array isn’t big enough and if no other errors occur, ‘snprintf’ and ‘vsnprintf’ truncate the output and return the number of bytes that ought to have been produced. Some older systems return the truncated length (e.g., GNU C Library 2.0.x or IRIX 6.5), some a negative value (e.g., earlier GNU C Library versions), and some the buffer length without truncation (e.g., 32-bit Solaris 7). Also, some buggy older systems ignore the length and overrun the buffer (e.g., 64-bit Solaris 7). ‘sprintf’ The C standard says ‘sprintf’ and ‘vsprintf’ return the number of bytes written. On some ancient systems (SunOS 4 for instance) they return the buffer pointer instead, but these no longer need to be worried about. ‘sscanf’ On various old systems, e.g., HP-UX 9, ‘sscanf’ requires that its input string be writable (though it doesn’t actually change it). This can be a problem when using ‘gcc’ since it normally puts constant strings in read-only memory (*note Incompatibilities of GCC: (gcc)Incompatibilities.). Apparently in some cases even having format strings read-only can be a problem. ‘strerror_r’ Posix specifies that ‘strerror_r’ returns an ‘int’, but many systems (e.g., GNU C Library version 2.2.4) provide a different version returning a ‘char *’. ‘AC_FUNC_STRERROR_R’ can detect which is in use (*note Particular Functions::). ‘strnlen’ AIX 4.3 provides a broken version which produces the following results: strnlen ("foobar", 0) = 0 strnlen ("foobar", 1) = 3 strnlen ("foobar", 2) = 2 strnlen ("foobar", 3) = 1 strnlen ("foobar", 4) = 0 strnlen ("foobar", 5) = 6 strnlen ("foobar", 6) = 6 strnlen ("foobar", 7) = 6 strnlen ("foobar", 8) = 6 strnlen ("foobar", 9) = 6 ‘sysconf’ ‘_SC_PAGESIZE’ is standard, but some older systems (e.g., HP-UX 9) have ‘_SC_PAGE_SIZE’ instead. This can be tested with ‘#ifdef’. ‘unlink’ The Posix spec says that ‘unlink’ causes the given file to be removed only after there are no more open file handles for it. Some non-Posix hosts have trouble with this requirement, though, and some DOS variants even corrupt the file system. ‘unsetenv’ On MinGW, ‘unsetenv’ is not available, but a variable ‘FOO’ can be removed with a call ‘putenv ("FOO=")’, as described under ‘putenv’ above. ‘va_copy’ C99 and later provide ‘va_copy’ for copying ‘va_list’ variables. It may be available in older environments too, though possibly as ‘__va_copy’ (e.g., ‘gcc’ in strict pre-C99 mode). These can be tested with ‘#ifdef’. A fallback to ‘memcpy (&dst, &src, sizeof (va_list))’ gives maximum portability. ‘va_list’ ‘va_list’ is not necessarily just a pointer. It can be a ‘struct’ (e.g., ‘gcc’ on Alpha), which means ‘NULL’ is not portable. Or it can be an array (e.g., ‘gcc’ in some PowerPC configurations), which means as a function parameter it can be effectively call-by-reference and library routines might modify the value back in the caller (e.g., ‘vsnprintf’ in the GNU C Library 2.1). Signed ‘>>’ Normally the C ‘>>’ right shift of a signed type replicates the high bit, giving a so-called “arithmetic” shift. But care should be taken since Standard C doesn’t require that behavior. On those few processors without a native arithmetic shift (for instance Cray vector systems) zero bits may be shifted in, the same as a shift of an unsigned type. Integer ‘/’ C divides signed integers by truncating their quotient toward zero, yielding the same result as Fortran. However, before C99 the standard allowed C implementations to take the floor or ceiling of the quotient in some cases. Hardly any implementations took advantage of this freedom, though, and it’s probably not worth worrying about this issue nowadays.  File: autoconf.info, Node: Particular Functions, Next: Generic Functions, Prev: Function Portability, Up: Library Functions 5.5.2 Particular Function Checks -------------------------------- These macros check for particular C functions—whether they exist, and in some cases how they respond when given certain arguments. -- Macro: AC_FUNC_ALLOCA Check for the ‘alloca’ function. Define ‘HAVE_ALLOCA_H’ if ‘alloca.h’ defines a working ‘alloca’. If not, look for a builtin alternative. If either method succeeds, define ‘HAVE_ALLOCA’. Otherwise, set the output variable ‘ALLOCA’ to ‘${LIBOBJDIR}alloca.o’ and define ‘C_ALLOCA’ (so programs can periodically call ‘alloca (0)’ to garbage collect). This variable is separate from ‘LIBOBJS’ so multiple programs can share the value of ‘ALLOCA’ without needing to create an actual library, in case only some of them use the code in ‘LIBOBJS’. The ‘${LIBOBJDIR}’ prefix serves the same purpose as in ‘LIBOBJS’ (*note AC_LIBOBJ vs LIBOBJS::). Source files that use ‘alloca’ should start with a piece of code like the following, to declare it properly. #include #include #ifdef HAVE_ALLOCA_H # include #elif !defined alloca # ifdef __GNUC__ # define alloca __builtin_alloca # elif defined _MSC_VER # include # define alloca _alloca # elif !defined HAVE_ALLOCA # ifdef __cplusplus extern "C" # endif void *alloca (size_t); # endif #endif If you don’t want to maintain this piece of code in your package manually, you can instead use the Gnulib module ‘alloca-opt’ or ‘alloca’. *Note Gnulib::. -- Macro: AC_FUNC_CHOWN If the ‘chown’ function is available and works (in particular, it should accept ‘-1’ for ‘uid’ and ‘gid’), define ‘HAVE_CHOWN’. The result of this macro is cached in the ‘ac_cv_func_chown_works’ variable. If you want a workaround, that is, a ‘chown’ function that is available and works, you can use the Gnulib module ‘chown’. *Note Gnulib::. -- Macro: AC_FUNC_CLOSEDIR_VOID If the ‘closedir’ function does not return a meaningful value, define ‘CLOSEDIR_VOID’. Otherwise, callers ought to check its return value for an error indicator. Currently this test is implemented by running a test program. When cross compiling the pessimistic assumption that ‘closedir’ does not return a meaningful value is made. The result of this macro is cached in the ‘ac_cv_func_closedir_void’ variable. This macro is obsolescent, as ‘closedir’ returns a meaningful value on current systems. New programs need not use this macro. -- Macro: AC_FUNC_ERROR_AT_LINE If the ‘error_at_line’ function is not found, require an ‘AC_LIBOBJ’ replacement of ‘error’. The result of this macro is cached in the ‘ac_cv_lib_error_at_line’ variable. The ‘AC_FUNC_ERROR_AT_LINE’ macro is obsolescent. New programs should use Gnulib’s ‘error’ module. *Note Gnulib::. -- Macro: AC_FUNC_FNMATCH If the ‘fnmatch’ function conforms to Posix, define ‘HAVE_FNMATCH’. Detect common implementation bugs, for example, the bugs in Solaris 2.4. Unlike the other specific ‘AC_FUNC’ macros, ‘AC_FUNC_FNMATCH’ does not replace a broken/missing ‘fnmatch’. This is for historical reasons. See ‘AC_REPLACE_FNMATCH’ below. The result of this macro is cached in the ‘ac_cv_func_fnmatch_works’ variable. This macro is obsolescent. New programs should use Gnulib’s ‘fnmatch-posix’ module. *Note Gnulib::. -- Macro: AC_FUNC_FNMATCH_GNU Behave like ‘AC_REPLACE_FNMATCH’ (_replace_) but also test whether ‘fnmatch’ supports GNU extensions. Detect common implementation bugs, for example, the bugs in the GNU C Library 2.1. The result of this macro is cached in the ‘ac_cv_func_fnmatch_gnu’ variable. This macro is obsolescent. New programs should use Gnulib’s ‘fnmatch-gnu’ module. *Note Gnulib::. -- Macro: AC_FUNC_FORK This macro checks for the ‘fork’ and ‘vfork’ functions. If a working ‘fork’ is found, define ‘HAVE_WORKING_FORK’. This macro checks whether ‘fork’ is just a stub by trying to run it. If ‘vfork.h’ is found, define ‘HAVE_VFORK_H’. If a working ‘vfork’ is found, define ‘HAVE_WORKING_VFORK’. Otherwise, define ‘vfork’ to be ‘fork’ for backward compatibility with previous versions of ‘autoconf’. This macro checks for several known errors in implementations of ‘vfork’ and considers the system to not have a working ‘vfork’ if it detects any of them. Since this macro defines ‘vfork’ only for backward compatibility with previous versions of ‘autoconf’ you’re encouraged to define it yourself in new code: #ifndef HAVE_WORKING_VFORK # define vfork fork #endif The results of this macro are cached in the ‘ac_cv_func_fork_works’ and ‘ac_cv_func_vfork_works’ variables. In order to override the test, you also need to set the ‘ac_cv_func_fork’ and ‘ac_cv_func_vfork’ variables. -- Macro: AC_FUNC_FSEEKO If the ‘fseeko’ function is available, define ‘HAVE_FSEEKO’. Define ‘_LARGEFILE_SOURCE’ if necessary to make the prototype visible on some systems (e.g., glibc 2.2). Otherwise linkage problems may occur when compiling with ‘AC_SYS_LARGEFILE’ on largefile-sensitive systems where ‘off_t’ does not default to a 64bit entity. All systems with ‘fseeko’ also supply ‘ftello’. The Gnulib module ‘fseeko’ invokes ‘AC_FUNC_FSEEKO’ and also contains workarounds for other portability problems of ‘fseeko’. *Note Gnulib::. -- Macro: AC_FUNC_GETGROUPS If the ‘getgroups’ function is available and works (unlike on Ultrix 4.3 and NeXTstep 3.2, where ‘getgroups (0, 0)’ always fails), define ‘HAVE_GETGROUPS’. Set ‘GETGROUPS_LIBS’ to any libraries needed to get that function. This macro runs ‘AC_TYPE_GETGROUPS’. This macro is obsolescent. New programs need not use this macro. But they may want to use the Gnulib module ‘getgroups’, which provides workarounds to other portability problems of this function. -- Macro: AC_FUNC_GETLOADAVG Check how to get the system load averages. To perform its tests properly, this macro needs the file ‘getloadavg.c’; therefore, be sure to set the ‘AC_LIBOBJ’ replacement directory properly (see *note Generic Functions::, ‘AC_CONFIG_LIBOBJ_DIR’). If the system has the ‘getloadavg’ function, define ‘HAVE_GETLOADAVG’, and set ‘GETLOADAVG_LIBS’ to any libraries necessary to get that function. Also add ‘GETLOADAVG_LIBS’ to ‘LIBS’. Otherwise, require an ‘AC_LIBOBJ’ replacement for ‘getloadavg’ and possibly define several other C preprocessor macros and output variables: 1. Define ‘C_GETLOADAVG’. 2. Define ‘SVR4’, ‘DGUX’, ‘UMAX’, or ‘UMAX4_3’ if on those systems. 3. If ‘nlist.h’ is found, define ‘HAVE_NLIST_H’. 4. If ‘struct nlist’ has an ‘n_un.n_name’ member, define ‘HAVE_STRUCT_NLIST_N_UN_N_NAME’. The obsolete symbol ‘NLIST_NAME_UNION’ is still defined, but do not depend upon it. 5. Programs may need to be installed set-group-ID (or set-user-ID) for ‘getloadavg’ to work. In this case, define ‘GETLOADAVG_PRIVILEGED’, set the output variable ‘NEED_SETGID’ to ‘true’ (and otherwise to ‘false’), and set ‘KMEM_GROUP’ to the name of the group that should own the installed program. The ‘AC_FUNC_GETLOADAVG’ macro is obsolescent. New programs should use Gnulib’s ‘getloadavg’ module. *Note Gnulib::. -- Macro: AC_FUNC_GETMNTENT Check for ‘getmntent’ in the standard C library, and then in the ‘sun’, ‘seq’, and ‘gen’ libraries, for UNICOS, IRIX 4, PTX, and UnixWare, respectively. Then, if ‘getmntent’ is available, define ‘HAVE_GETMNTENT’ and set ‘ac_cv_func_getmntent’ to ‘yes’. Otherwise set ‘ac_cv_func_getmntent’ to ‘no’. The result of this macro can be overridden by setting the cache variable ‘ac_cv_search_getmntent’. The ‘AC_FUNC_GETMNTENT’ macro is obsolescent. New programs should use Gnulib’s ‘mountlist’ module. *Note Gnulib::. -- Macro: AC_FUNC_GETPGRP Define ‘GETPGRP_VOID’ if it is an error to pass 0 to ‘getpgrp’; this is the Posix behavior. On older BSD systems, you must pass 0 to ‘getpgrp’, as it takes an argument and behaves like Posix’s ‘getpgid’. #ifdef GETPGRP_VOID pid = getpgrp (); #else pid = getpgrp (0); #endif This macro does not check whether ‘getpgrp’ exists at all; if you need to work in that situation, first call ‘AC_CHECK_FUNC’ for ‘getpgrp’. The result of this macro is cached in the ‘ac_cv_func_getpgrp_void’ variable. This macro is obsolescent, as current systems have a ‘getpgrp’ whose signature conforms to Posix. New programs need not use this macro. -- Macro: AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK If ‘link’ is a symbolic link, then ‘lstat’ should treat ‘link/’ the same as ‘link/.’. However, many older ‘lstat’ implementations incorrectly ignore trailing slashes. It is safe to assume that if ‘lstat’ incorrectly ignores trailing slashes, then other symbolic-link-aware functions like ‘unlink’ also incorrectly ignore trailing slashes. If ‘lstat’ behaves properly, define ‘LSTAT_FOLLOWS_SLASHED_SYMLINK’, otherwise require an ‘AC_LIBOBJ’ replacement of ‘lstat’. The result of this macro is cached in the ‘ac_cv_func_lstat_dereferences_slashed_symlink’ variable. The ‘AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK’ macro is obsolescent. New programs should use Gnulib’s ‘lstat’ module. *Note Gnulib::. -- Macro: AC_FUNC_MALLOC If the ‘malloc’ function is compatible with the GNU C library ‘malloc’ (i.e., ‘malloc (0)’ returns a valid pointer), define ‘HAVE_MALLOC’ to 1. Otherwise define ‘HAVE_MALLOC’ to 0, ask for an ‘AC_LIBOBJ’ replacement for ‘malloc’, and define ‘malloc’ to ‘rpl_malloc’ so that the native ‘malloc’ is not used in the main project. Typically, the replacement file ‘malloc.c’ should look like (note the ‘#undef malloc’): #include #undef malloc #include void *malloc (); /* Allocate an N-byte block of memory from the heap. If N is zero, allocate a 1-byte block. */ void * rpl_malloc (size_t n) { if (n == 0) n = 1; return malloc (n); } The result of this macro is cached in the ‘ac_cv_func_malloc_0_nonnull’ variable. If you don’t want to maintain a ‘malloc.c’ file in your package manually, you can instead use the Gnulib module ‘malloc-gnu’. -- Macro: AC_FUNC_MBRTOWC Define ‘HAVE_MBRTOWC’ to 1 if the function ‘mbrtowc’ and the type ‘mbstate_t’ are properly declared. The result of this macro is cached in the ‘ac_cv_func_mbrtowc’ variable. The Gnulib module ‘mbrtowc’ not only ensures that the function is declared, but also works around other portability problems of this function. -- Macro: AC_FUNC_MEMCMP If the ‘memcmp’ function is not available, or does not work on 8-bit data (like the one on SunOS 4.1.3), or fails when comparing 16 bytes or more and with at least one buffer not starting on a 4-byte boundary (such as the one on NeXT x86 OpenStep), require an ‘AC_LIBOBJ’ replacement for ‘memcmp’. The result of this macro is cached in the ‘ac_cv_func_memcmp_working’ variable. This macro is obsolescent, as current systems have a working ‘memcmp’. New programs need not use this macro. -- Macro: AC_FUNC_MKTIME If the ‘mktime’ function is not available, or does not work correctly, require an ‘AC_LIBOBJ’ replacement for ‘mktime’. For the purposes of this test, ‘mktime’ should conform to the Posix standard and should be the inverse of ‘localtime’. The result of this macro is cached in the ‘ac_cv_func_working_mktime’ variable. The ‘AC_FUNC_MKTIME’ macro is obsolescent. New programs should use Gnulib’s ‘mktime’ module. *Note Gnulib::. -- Macro: AC_FUNC_MMAP If the ‘mmap’ function exists and works correctly, define ‘HAVE_MMAP’. This checks only private fixed mapping of already-mapped memory. The result of this macro is cached in the ‘ac_cv_func_mmap_fixed_mapped’ variable. Note: This macro asks for more than what an average program needs from ‘mmap’. In particular, the use of ‘MAP_FIXED’ fails on HP-UX 11, whereas ‘mmap’ otherwise works fine on this platform. -- Macro: AC_FUNC_OBSTACK If the obstacks are found, define ‘HAVE_OBSTACK’, else require an ‘AC_LIBOBJ’ replacement for ‘obstack’. The result of this macro is cached in the ‘ac_cv_func_obstack’ variable. The ‘AC_FUNC_OBSTACK’ macro is obsolescent. New programs should use Gnulib’s ‘obstack’ module. *Note Gnulib::. -- Macro: AC_FUNC_REALLOC If the ‘realloc’ function is compatible with the GNU C library ‘realloc’ (i.e., ‘realloc (NULL, 0)’ returns a valid pointer), define ‘HAVE_REALLOC’ to 1. Otherwise define ‘HAVE_REALLOC’ to 0, ask for an ‘AC_LIBOBJ’ replacement for ‘realloc’, and define ‘realloc’ to ‘rpl_realloc’ so that the native ‘realloc’ is not used in the main project. See ‘AC_FUNC_MALLOC’ for details. The result of this macro is cached in the ‘ac_cv_func_realloc_0_nonnull’ variable. If you don’t want to maintain a ‘realloc.c’ file in your package manually, you can instead use the Gnulib module ‘realloc-gnu’. -- Macro: AC_FUNC_SELECT_ARGTYPES Determines the correct type to be passed for each of the ‘select’ function’s arguments, and defines those types in ‘SELECT_TYPE_ARG1’, ‘SELECT_TYPE_ARG234’, and ‘SELECT_TYPE_ARG5’ respectively. ‘SELECT_TYPE_ARG1’ defaults to ‘int’, ‘SELECT_TYPE_ARG234’ defaults to ‘int *’, and ‘SELECT_TYPE_ARG5’ defaults to ‘struct timeval *’. This macro is obsolescent, as current systems have a ‘select’ whose signature conforms to Posix. New programs need not use this macro. -- Macro: AC_FUNC_SETPGRP If ‘setpgrp’ takes no argument (the Posix version), define ‘SETPGRP_VOID’. Otherwise, it is the BSD version, which takes two process IDs as arguments. This macro does not check whether ‘setpgrp’ exists at all; if you need to work in that situation, first call ‘AC_CHECK_FUNC’ for ‘setpgrp’. This macro also does not check for the Solaris variant of ‘setpgrp’, which returns a ‘pid_t’ instead of an ‘int’; portable code should only use the return value by comparing it against ‘-1’ to check for errors. The result of this macro is cached in the ‘ac_cv_func_setpgrp_void’ variable. This macro is obsolescent, as all forms of ‘setpgrp’ are also obsolescent. New programs should use the Posix function ‘setpgid’, which takes two process IDs as arguments (like the BSD ‘setpgrp’). -- Macro: AC_FUNC_STAT -- Macro: AC_FUNC_LSTAT Determine whether ‘stat’ or ‘lstat’ have the bug that it succeeds when given the zero-length file name as argument. The ‘stat’ and ‘lstat’ from SunOS 4.1.4 and the Hurd (as of 1998-11-01) do this. If it does, then define ‘HAVE_STAT_EMPTY_STRING_BUG’ (or ‘HAVE_LSTAT_EMPTY_STRING_BUG’) and ask for an ‘AC_LIBOBJ’ replacement of it. The results of these macros are cached in the ‘ac_cv_func_stat_empty_string_bug’ and the ‘ac_cv_func_lstat_empty_string_bug’ variables, respectively. These macros are obsolescent, as no current systems have the bug. New programs need not use these macros. -- Macro: AC_FUNC_STRCOLL If the ‘strcoll’ function exists and works correctly, define ‘HAVE_STRCOLL’. This does a bit more than ‘AC_CHECK_FUNCS(strcoll)’, because some systems have incorrect definitions of ‘strcoll’ that should not be used. But it does not check against a known bug of this function on Solaris 10. The result of this macro is cached in the ‘ac_cv_func_strcoll_works’ variable. -- Macro: AC_FUNC_STRERROR_R If ‘strerror_r’ is available, define ‘HAVE_STRERROR_R’, and if it is declared, define ‘HAVE_DECL_STRERROR_R’. If it returns a ‘char *’ message, define ‘STRERROR_R_CHAR_P’; otherwise it returns an ‘int’ error number. The Thread-Safe Functions option of Posix requires ‘strerror_r’ to return ‘int’, but many systems (including, for example, version 2.2.4 of the GNU C Library) return a ‘char *’ value that is not necessarily equal to the buffer argument. The result of this macro is cached in the ‘ac_cv_func_strerror_r_char_p’ variable. The Gnulib module ‘strerror_r’ not only ensures that the function has the return type specified by Posix, but also works around other portability problems of this function. -- Macro: AC_FUNC_STRFTIME Check for ‘strftime’ in the ‘intl’ library, for SCO Unix. Then, if ‘strftime’ is available, define ‘HAVE_STRFTIME’. This macro is obsolescent, as no current systems require the ‘intl’ library for ‘strftime’. New programs need not use this macro. -- Macro: AC_FUNC_STRTOD If the ‘strtod’ function does not exist or doesn’t work correctly, ask for an ‘AC_LIBOBJ’ replacement of ‘strtod’. In this case, because ‘strtod.c’ is likely to need ‘pow’, set the output variable ‘POW_LIB’ to the extra library needed. This macro caches its result in the ‘ac_cv_func_strtod’ variable and depends upon the result in the ‘ac_cv_func_pow’ variable. The ‘AC_FUNC_STRTOD’ macro is obsolescent. New programs should use Gnulib’s ‘strtod’ module. *Note Gnulib::. -- Macro: AC_FUNC_STRTOLD If the ‘strtold’ function exists and conforms to C99 or later, define ‘HAVE_STRTOLD’. This macro caches its result in the ‘ac_cv_func_strtold’ variable. The Gnulib module ‘strtold’ not only ensures that the function exists, but also works around other portability problems of this function. -- Macro: AC_FUNC_STRNLEN If the ‘strnlen’ function is not available, or is buggy (like the one from AIX 4.3), require an ‘AC_LIBOBJ’ replacement for it. This macro caches its result in the ‘ac_cv_func_strnlen_working’ variable. The ‘AC_FUNC_STRNLEN’ macro is obsolescent. New programs should use Gnulib’s ‘strnlen’ module. *Note Gnulib::. -- Macro: AC_FUNC_UTIME_NULL If ‘utime (FILE, NULL)’ sets FILE’s timestamp to the present, define ‘HAVE_UTIME_NULL’. This macro caches its result in the ‘ac_cv_func_utime_null’ variable. This macro is obsolescent, as all current systems have a ‘utime’ that behaves this way. New programs need not use this macro. -- Macro: AC_FUNC_VPRINTF If ‘vprintf’ is found, define ‘HAVE_VPRINTF’. Otherwise, if ‘_doprnt’ is found, define ‘HAVE_DOPRNT’. (If ‘vprintf’ is available, you may assume that ‘vfprintf’ and ‘vsprintf’ are also available.) This macro is obsolescent, as all current systems have ‘vprintf’. New programs need not use this macro. -- Macro: AC_REPLACE_FNMATCH If the ‘fnmatch’ function does not conform to Posix (see ‘AC_FUNC_FNMATCH’), ask for its ‘AC_LIBOBJ’ replacement. The files ‘fnmatch.c’, ‘fnmatch_loop.c’, and ‘fnmatch_.h’ in the ‘AC_LIBOBJ’ replacement directory are assumed to contain a copy of the source code of GNU ‘fnmatch’. If necessary, this source code is compiled as an ‘AC_LIBOBJ’ replacement, and the ‘fnmatch_.h’ file is linked to ‘fnmatch.h’ so that it can be included in place of the system ‘’. This macro caches its result in the ‘ac_cv_func_fnmatch_works’ variable. This macro is obsolescent, as it assumes the use of particular source files. New programs should use Gnulib’s ‘fnmatch-posix’ module, which provides this macro along with the source files. *Note Gnulib::.  File: autoconf.info, Node: Generic Functions, Prev: Particular Functions, Up: Library Functions 5.5.3 Generic Function Checks ----------------------------- These macros are used to find functions not covered by the “particular” test macros. If the functions might be in libraries other than the default C library, first call ‘AC_CHECK_LIB’ for those libraries. If you need to check the behavior of a function as well as find out whether it is present, you have to write your own test for it (*note Writing Tests::). -- Macro: AC_CHECK_FUNC (FUNCTION, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) If C function FUNCTION is available, run shell commands ACTION-IF-FOUND, otherwise ACTION-IF-NOT-FOUND. If you just want to define a symbol if the function is available, consider using ‘AC_CHECK_FUNCS’ instead. This macro checks for functions with C linkage even when ‘AC_LANG(C++)’ has been called, since C is more standardized than C++. (*note Language Choice::, for more information about selecting the language for checks.) This macro caches its result in the ‘ac_cv_func_FUNCTION’ variable. -- Macro: AC_CHECK_FUNCS (FUNCTION..., [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) For each FUNCTION enumerated in the blank-or-newline-separated argument list, define ‘HAVE_FUNCTION’ (in all capitals) if it is available. If ACTION-IF-FOUND is given, it is additional shell code to execute when one of the functions is found. You can give it a value of ‘break’ to break out of the loop on the first match. If ACTION-IF-NOT-FOUND is given, it is executed when one of the functions is not found. Results are cached for each FUNCTION as in ‘AC_CHECK_FUNC’. -- Macro: AC_CHECK_FUNCS_ONCE (FUNCTION...) For each FUNCTION enumerated in the blank-or-newline-separated argument list, define ‘HAVE_FUNCTION’ (in all capitals) if it is available. This is a once-only variant of ‘AC_CHECK_FUNCS’. It generates the checking code at most once, so that ‘configure’ is smaller and faster; but the checks cannot be conditionalized and are always done once, early during the ‘configure’ run. Autoconf follows a philosophy that was formed over the years by those who have struggled for portability: isolate the portability issues in specific files, and then program as if you were in a Posix environment. Some functions may be missing or unfixable, and your package must be ready to replace them. Suitable replacements for many such problem functions are available from Gnulib (*note Gnulib::). -- Macro: AC_LIBOBJ (FUNCTION) Specify that ‘FUNCTION.c’ must be included in the executables to replace a missing or broken implementation of FUNCTION. Technically, it adds ‘FUNCTION.$ac_objext’ to the output variable ‘LIBOBJS’ if it is not already in, and calls ‘AC_LIBSOURCE’ for ‘FUNCTION.c’. You should not directly change ‘LIBOBJS’, since this is not traceable. -- Macro: AC_LIBSOURCE (FILE) Specify that FILE might be needed to compile the project. If you need to know what files might be needed by a ‘configure.ac’, you should trace ‘AC_LIBSOURCE’. FILE must be a literal. This macro is called automatically from ‘AC_LIBOBJ’, but you must call it explicitly if you pass a shell variable to ‘AC_LIBOBJ’. In that case, since shell variables cannot be traced statically, you must pass to ‘AC_LIBSOURCE’ any possible files that the shell variable might cause ‘AC_LIBOBJ’ to need. For example, if you want to pass a variable ‘$foo_or_bar’ to ‘AC_LIBOBJ’ that holds either ‘"foo"’ or ‘"bar"’, you should do: AC_LIBSOURCE([foo.c]) AC_LIBSOURCE([bar.c]) AC_LIBOBJ([$foo_or_bar]) There is usually a way to avoid this, however, and you are encouraged to simply call ‘AC_LIBOBJ’ with literal arguments. Note that this macro replaces the obsolete ‘AC_LIBOBJ_DECL’, with slightly different semantics: the old macro took the function name, e.g., ‘foo’, as its argument rather than the file name. -- Macro: AC_LIBSOURCES (FILES) Like ‘AC_LIBSOURCE’, but accepts one or more FILES in a comma-separated M4 list. Thus, the above example might be rewritten: AC_LIBSOURCES([foo.c, bar.c]) AC_LIBOBJ([$foo_or_bar]) -- Macro: AC_CONFIG_LIBOBJ_DIR (DIRECTORY) Specify that ‘AC_LIBOBJ’ replacement files are to be found in DIRECTORY, a name relative to the top level of the source tree. The replacement directory defaults to ‘.’, the top level directory, and the most typical value is ‘lib’, corresponding to ‘AC_CONFIG_LIBOBJ_DIR([lib])’. ‘configure’ might need to know the replacement directory for the following reasons: (i) some checks use the replacement files, (ii) some macros bypass broken system headers by installing links to the replacement headers (iii) when used in conjunction with Automake, within each makefile, DIRECTORY is used as a relative path from ‘$(top_srcdir)’ to each object named in ‘LIBOBJS’ and ‘LTLIBOBJS’, etc. It is common to merely check for the existence of a function, and ask for its ‘AC_LIBOBJ’ replacement if missing. The following macro is a convenient shorthand. -- Macro: AC_REPLACE_FUNCS (FUNCTION...) Like ‘AC_CHECK_FUNCS’, but uses ‘AC_LIBOBJ(FUNCTION)’ as ACTION-IF-NOT-FOUND. You can declare your replacement function by enclosing the prototype in ‘#ifndef HAVE_FUNCTION’. If the system has the function, it probably declares it in a header file you should be including, so you shouldn’t redeclare it lest your declaration conflict.  File: autoconf.info, Node: Header Files, Next: Declarations, Prev: Library Functions, Up: Existing Tests 5.6 Header Files ================ The following macros check for the presence of certain C header files. If there is no macro specifically defined to check for a header file you need, and you don’t need to check for any special properties of it, then you can use one of the general header-file check macros. * Menu: * Header Portability:: Collected knowledge on common headers * Particular Headers:: Special handling to find certain headers * Generic Headers:: How to find other headers  File: autoconf.info, Node: Header Portability, Next: Particular Headers, Up: Header Files 5.6.1 Portability of Headers ---------------------------- This section documents some collected knowledge about common headers, and the problems they cause. By definition, this list always requires additions. A much more complete list is maintained by the Gnulib project (*note Gnulib::), covering *note Posix Headers: (gnulib)Header File Substitutes. and *note Glibc Headers: (gnulib)Glibc Header File Substitutes. Please help us keep the Gnulib list as complete as possible. When we say that a header “may require” some set of other headers, we mean that it may be necessary for you to manually include those other headers first, or the contents of the header under test will fail to compile. When checking for these headers, you must provide the potentially-required headers in the INCLUDES argument to ‘AC_CHECK_HEADER’ or ‘AC_CHECK_HEADERS’, or the check will fail spuriously. ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::) arranges to include a number of common requirements and should normally come first in your INCLUDES. For example, ‘net/if.h’ may require ‘sys/types.h’, ‘sys/socket.h’, or both, and ‘AC_INCLUDES_DEFAULT’ handles ‘sys/types.h’ but not ‘sys/socket.h’, so you should check for it like this: AC_CHECK_HEADERS([sys/socket.h]) AC_CHECK_HEADERS([net/if.h], [], [], [AC_INCLUDES_DEFAULT[ #ifdef HAVE_SYS_SOCKET_H # include #endif ]]) Note that the example mixes single quoting (for‘AC_INCLUDES_DEFAULT’, so that it gets expanded) and double quoting (to ensure that each preprocessor ‘#’ gets treated as a literal string rather than a comment). ‘limits.h’ In C99 and later, ‘limits.h’ defines ‘LLONG_MIN’, ‘LLONG_MAX’, and ‘ULLONG_MAX’, but many almost-C99 environments (e.g., default GCC 4.0.2 + glibc 2.4) do not define them. ‘memory.h’ This header file is obsolete; use ‘string.h’ instead. ‘strings.h’ On some systems, this is the only header that declares ‘strcasecmp’, ‘strncasecmp’, and ‘ffs’. This header may or may not include ‘string.h’ for you. However, on all recent systems it is safe to include both ‘string.h’ and ‘strings.h’, in either order, in the same source file. ‘inttypes.h’ vs. ‘stdint.h’ C99 specifies that ‘inttypes.h’ includes ‘stdint.h’, so there’s no need to include ‘stdint.h’ separately in a standard environment. However, some implementations have ‘inttypes.h’ but not ‘stdint.h’ (e.g., Solaris 7), and some have ‘stdint.h’ but not ‘inttypes.h’ (e.g. MSVC 2012). Therefore, it is necessary to check for each and include each only if available. ‘linux/irda.h’ This header may require ‘linux/types.h’ and/or ‘sys/socket.h’. ‘linux/random.h’ This header may require ‘linux/types.h’. ‘net/if.h’ This header may require ‘sys/types.h’ and/or ‘sys/socket.h’. ‘netinet/if_ether.h’ This header may require some combination of ‘sys/types.h’, ‘sys/socket.h’, ‘netinet/in.h’, and ‘net/if.h’. ‘sys/mount.h’ This header may require ‘sys/params.h’. ‘sys/ptem.h’ This header may require ‘sys/stream.h’. ‘sys/socket.h’ This header may require ‘sys/types.h’. ‘sys/ucred.h’ This header may require ‘sys/types.h’. ‘X11/extensions/scrnsaver.h’ Using XFree86, this header requires ‘X11/Xlib.h’, which is probably so required that you might not even consider looking for it.  File: autoconf.info, Node: Particular Headers, Next: Generic Headers, Prev: Header Portability, Up: Header Files 5.6.2 Particular Header Checks ------------------------------ These macros check for particular system header files—whether they exist, and in some cases whether they declare certain symbols. -- Macro: AC_CHECK_HEADER_STDBOOL Check whether ‘stdbool.h’ exists and conforms to C99 or later, and cache the result in the ‘ac_cv_header_stdbool_h’ variable. If the type ‘_Bool’ is defined, define ‘HAVE__BOOL’ to 1. This macro is intended for use by Gnulib (*note Gnulib::) and other packages that supply a substitute ‘stdbool.h’ on platforms lacking a conforming one. The ‘AC_HEADER_STDBOOL’ macro is better for code that explicitly checks for ‘stdbool.h’. -- Macro: AC_HEADER_ASSERT Check whether to enable assertions in the style of ‘assert.h’. Assertions are enabled by default, but the user can override this by invoking ‘configure’ with the ‘--disable-assert’ option. -- Macro: AC_HEADER_DIRENT Check for the following header files. For the first one that is found and defines ‘DIR’, define the listed C preprocessor macro: ‘dirent.h’ ‘HAVE_DIRENT_H’ ‘sys/ndir.h’ ‘HAVE_SYS_NDIR_H’ ‘sys/dir.h’ ‘HAVE_SYS_DIR_H’ ‘ndir.h’ ‘HAVE_NDIR_H’ The directory-library declarations in your source code should look something like the following: #include #ifdef HAVE_DIRENT_H # include # define NAMLEN(dirent) strlen ((dirent)->d_name) #else # define dirent direct # define NAMLEN(dirent) ((dirent)->d_namlen) # ifdef HAVE_SYS_NDIR_H # include # endif # ifdef HAVE_SYS_DIR_H # include # endif # ifdef HAVE_NDIR_H # include # endif #endif Using the above declarations, the program would declare variables to be of type ‘struct dirent’, not ‘struct direct’, and would access the length of a directory entry name by passing a pointer to a ‘struct dirent’ to the ‘NAMLEN’ macro. This macro also checks for the SCO Xenix ‘dir’ and ‘x’ libraries. This macro is obsolescent, as all current systems with directory libraries have ‘’. New programs need not use this macro. Also see ‘AC_STRUCT_DIRENT_D_INO’ and ‘AC_STRUCT_DIRENT_D_TYPE’ (*note Particular Structures::). -- Macro: AC_HEADER_MAJOR Detect the headers required to use ‘makedev’, ‘major’, and ‘minor’. These functions may be defined by ‘sys/mkdev.h’, ‘sys/sysmacros.h’, or ‘sys/types.h’. ‘AC_HEADER_MAJOR’ defines ‘MAJOR_IN_MKDEV’ if they are in ‘sys/mkdev.h’, or ‘MAJOR_IN_SYSMACROS’ if they are in ‘sys/sysmacros.h’. If neither macro is defined, they are either in ‘sys/types.h’ or unavailable. To properly use these functions, your code should contain something like: #include #ifdef MAJOR_IN_MKDEV # include #elif defined MAJOR_IN_SYSMACROS # include #endif Note: Configure scripts built with Autoconf 2.69 or earlier will not detect a problem if ‘sys/types.h’ contains definitions of ‘major’, ‘minor’, and/or ‘makedev’ that trigger compiler warnings upon use. This is known to occur with GNU libc 2.25, where those definitions are being deprecated to reduce namespace pollution. If it is not practical to use Autoconf 2.70 to regenerate the configure script of affected software, you can work around the problem by setting ‘ac_cv_header_sys_types_h_makedev=no’, as an argument to ‘configure’ or as part of a ‘config.site’ site default file (*note Site Defaults::). -- Macro: AC_HEADER_RESOLV Checks for header ‘resolv.h’, checking for prerequisites first. To properly use ‘resolv.h’, your code should contain something like the following: #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_NETINET_IN_H # include /* inet_ functions / structs */ #endif #ifdef HAVE_ARPA_NAMESER_H # include /* DNS HEADER struct */ #endif #ifdef HAVE_NETDB_H # include #endif #include -- Macro: AC_HEADER_STAT If the macros ‘S_ISDIR’, ‘S_ISREG’, etc. defined in ‘sys/stat.h’ do not work properly (returning false positives), define ‘STAT_MACROS_BROKEN’. This is the case on Tektronix UTekV, Amdahl UTS and Motorola System V/88. This macro is obsolescent, as no current systems have the bug. New programs need not use this macro. -- Macro: AC_HEADER_STDBOOL If ‘stdbool.h’ exists and conforms to C99 or later, define ‘HAVE_STDBOOL_H’ to 1; if the type ‘_Bool’ is defined, define ‘HAVE__BOOL’ to 1. To fulfill the standard’s requirements, your program could contain the following code: #ifdef HAVE_STDBOOL_H # include #else # ifndef HAVE__BOOL # ifdef __cplusplus typedef bool _Bool; # else # define _Bool signed char # endif # endif # define bool _Bool # define false 0 # define true 1 # define __bool_true_false_are_defined 1 #endif Alternatively you can use the ‘stdbool’ package of Gnulib (*note Gnulib::). It simplifies your code so that it can say just ‘#include ’, and it adds support for less-common platforms. This macro caches its result in the ‘ac_cv_header_stdbool_h’ variable. This macro differs from ‘AC_CHECK_HEADER_STDBOOL’ only in that it defines ‘HAVE_STDBOOL_H’ whereas ‘AC_CHECK_HEADER_STDBOOL’ does not. -- Macro: AC_HEADER_STDC This macro is obsolescent. Its sole effect is to make sure that all the headers that are included by ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::), but not part of ISO C90, have been checked for. All hosted environments that are still of interest for portable code provide all of the headers specified in ISO C90 (as amended in 1995). -- Macro: AC_HEADER_SYS_WAIT If ‘sys/wait.h’ exists and is compatible with Posix, define ‘HAVE_SYS_WAIT_H’. Incompatibility can occur if ‘sys/wait.h’ does not exist, or if it uses the old BSD ‘union wait’ instead of ‘int’ to store a status value. If ‘sys/wait.h’ is not Posix compatible, then instead of including it, define the Posix macros with their usual interpretations. Here is an example: #include #ifdef HAVE_SYS_WAIT_H # include #endif #ifndef WEXITSTATUS # define WEXITSTATUS(stat_val) ((unsigned int) (stat_val) >> 8) #endif #ifndef WIFEXITED # define WIFEXITED(stat_val) (((stat_val) & 255) == 0) #endif This macro caches its result in the ‘ac_cv_header_sys_wait_h’ variable. This macro is obsolescent, as current systems are compatible with Posix. New programs need not use this macro. ‘_POSIX_VERSION’ is defined when ‘unistd.h’ is included on Posix systems. If there is no ‘unistd.h’, it is definitely not a Posix system. However, some non-Posix systems do have ‘unistd.h’. The way to check whether the system supports Posix is: #ifdef HAVE_UNISTD_H # include # include #endif #ifdef _POSIX_VERSION /* Code for Posix systems. */ #endif -- Macro: AC_HEADER_TIOCGWINSZ If the use of ‘TIOCGWINSZ’ requires ‘’, then define ‘GWINSZ_IN_SYS_IOCTL’. Otherwise ‘TIOCGWINSZ’ can be found in ‘’. Use: #ifdef HAVE_TERMIOS_H # include #endif #ifdef GWINSZ_IN_SYS_IOCTL # include #endif  File: autoconf.info, Node: Generic Headers, Prev: Particular Headers, Up: Header Files 5.6.3 Generic Header Checks --------------------------- These macros are used to find system header files not covered by the “particular” test macros. If you need to check the contents of a header as well as find out whether it is present, you have to write your own test for it (*note Writing Tests::). -- Macro: AC_CHECK_HEADER (HEADER-FILE, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [INCLUDES]) If the system header file HEADER-FILE is compilable, execute shell commands ACTION-IF-FOUND, otherwise execute ACTION-IF-NOT-FOUND. If you just want to define a symbol if the header file is available, consider using ‘AC_CHECK_HEADERS’ instead. INCLUDES should be the appropriate “prerequisite” code, i.e. whatever might be required to appear above ‘#include ’ for it to compile without error. This can be anything, but will normally be additional ‘#include’ directives. If INCLUDES is omitted or empty, ‘configure’ will use the contents of the macro ‘AC_INCLUDES_DEFAULT’. *Note Default Includes::. This macro used to check only for the _presence_ of a header, not whether its contents were acceptable to the compiler. Some older ‘configure’ scripts rely on this behavior, so it is still available by specifying ‘-’ as INCLUDES. This mechanism is deprecated as of Autoconf 2.70; situations where a preprocessor-only check is required should use ‘AC_PREPROC_IFELSE’. *Note Running the Preprocessor::. This macro caches its result in the ‘ac_cv_header_HEADER-FILE’ variable, with characters not suitable for a variable name mapped to underscores. -- Macro: AC_CHECK_HEADERS (HEADER-FILE..., [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [INCLUDES]) For each given system header file HEADER-FILE in the blank-separated argument list that exists, define ‘HAVE_HEADER-FILE’ (in all capitals). If ACTION-IF-FOUND is given, it is additional shell code to execute when one of the header files is found. You can give it a value of ‘break’ to break out of the loop on the first match. If ACTION-IF-NOT-FOUND is given, it is executed when one of the header files is not found. INCLUDES is interpreted as in ‘AC_CHECK_HEADER’, in order to choose the set of preprocessor directives supplied before the header under test. This macro caches its result in the ‘ac_cv_header_HEADER-FILE’ variable, with characters not suitable for a variable name mapped to underscores. -- Macro: AC_CHECK_HEADERS_ONCE (HEADER-FILE...) For each given system header file HEADER-FILE in the blank-separated argument list that exists, define ‘HAVE_HEADER-FILE’ (in all capitals). If you do not need the full power of ‘AC_CHECK_HEADERS’, this variant generates smaller, faster ‘configure’ files. All headers passed to ‘AC_CHECK_HEADERS_ONCE’ are checked for in one pass, early during the ‘configure’ run. The checks cannot be conditionalized, you cannot specify an ACTION-IF-FOUND or ACTION-IF-NOT-FOUND, and ‘AC_INCLUDES_DEFAULT’ is always used for the prerequisites. In previous versions of Autoconf, these macros merely checked whether the header was accepted by the preprocessor. This was changed because the old test was inappropriate for typical uses. Headers are typically used to compile, not merely to preprocess, and the old behavior sometimes accepted headers that clashed at compile-time (*note Present But Cannot Be Compiled::). If for some reason it is inappropriate to check whether a header is compilable, you should use ‘AC_PREPROC_IFELSE’ (*note Running the Preprocessor::) instead of these macros. Requiring each header to compile improves the robustness of the test, but it also requires you to make sure that the INCLUDES are correct. Most system headers nowadays make sure to ‘#include’ whatever they require, or else have their dependencies satisfied by ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::), but *note Header Portability::, for known exceptions. In general, if you are looking for ‘bar.h’, which requires that ‘foo.h’ be included first if it exists, you should do something like this: AC_CHECK_HEADERS([foo.h]) AC_CHECK_HEADERS([bar.h], [], [], [#ifdef HAVE_FOO_H # include #endif ])  File: autoconf.info, Node: Declarations, Next: Structures, Prev: Header Files, Up: Existing Tests 5.7 Declarations ================ The following macros check for the declaration of variables and functions. If there is no macro specifically defined to check for a symbol you need, then you can use the general macros (*note Generic Declarations::) or, for more complex tests, you may use ‘AC_COMPILE_IFELSE’ (*note Running the Compiler::). * Menu: * Particular Declarations:: Macros to check for certain declarations * Generic Declarations:: How to find other declarations  File: autoconf.info, Node: Particular Declarations, Next: Generic Declarations, Up: Declarations 5.7.1 Particular Declaration Checks ----------------------------------- There are no specific macros for declarations.  File: autoconf.info, Node: Generic Declarations, Prev: Particular Declarations, Up: Declarations 5.7.2 Generic Declaration Checks -------------------------------- These macros are used to find declarations not covered by the “particular” test macros. -- Macro: AC_CHECK_DECL (SYMBOL, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [INCLUDES = ‘AC_INCLUDES_DEFAULT’]) If SYMBOL (a function, variable, or constant) is not declared in INCLUDES and a declaration is needed, run the shell commands ACTION-IF-NOT-FOUND, otherwise ACTION-IF-FOUND. INCLUDES is a series of include directives, defaulting to ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::), which are used prior to the declaration under test. This macro actually tests whether SYMBOL is defined as a macro or can be used as an r-value, not whether it is really declared, because it is much safer to avoid introducing extra declarations when they are not needed. In order to facilitate use of C++ and overloaded function declarations, it is possible to specify function argument types in parentheses for types which can be zero-initialized: AC_CHECK_DECL([basename(char *)]) This macro caches its result in the ‘ac_cv_have_decl_SYMBOL’ variable, with characters not suitable for a variable name mapped to underscores. -- Macro: AC_CHECK_DECLS (SYMBOLS, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [INCLUDES = ‘AC_INCLUDES_DEFAULT’]) For each of the SYMBOLS (_comma_-separated list with optional function argument types for C++ overloads), define ‘HAVE_DECL_SYMBOL’ (in all capitals) to ‘1’ if SYMBOL is declared, otherwise to ‘0’. If ACTION-IF-NOT-FOUND is given, it is additional shell code to execute when one of the function declarations is needed, otherwise ACTION-IF-FOUND is executed. INCLUDES is a series of include directives, defaulting to ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::), which are used prior to the declarations under test. This macro uses an M4 list as first argument: AC_CHECK_DECLS([strdup]) AC_CHECK_DECLS([strlen]) AC_CHECK_DECLS([malloc, realloc, calloc, free]) AC_CHECK_DECLS([j0], [], [], [[#include ]]) AC_CHECK_DECLS([[basename(char *)], [dirname(char *)]]) Unlike the other ‘AC_CHECK_*S’ macros, when a SYMBOL is not declared, ‘HAVE_DECL_SYMBOL’ is defined to ‘0’ instead of leaving ‘HAVE_DECL_SYMBOL’ undeclared. When you are _sure_ that the check was performed, use ‘HAVE_DECL_SYMBOL’ in ‘#if’: #if !HAVE_DECL_SYMBOL extern char *symbol; #endif If the test may have not been performed, however, because it is safer _not_ to declare a symbol than to use a declaration that conflicts with the system’s one, you should use: #if defined HAVE_DECL_MALLOC && !HAVE_DECL_MALLOC void *malloc (size_t *s); #endif You fall into the second category only in extreme situations: either your files may be used without being configured, or they are used during the configuration. In most cases the traditional approach is enough. This macro caches its results in ‘ac_cv_have_decl_SYMBOL’ variables, with characters not suitable for a variable name mapped to underscores. -- Macro: AC_CHECK_DECLS_ONCE (SYMBOLS) For each of the SYMBOLS (_comma_-separated list), define ‘HAVE_DECL_SYMBOL’ (in all capitals) to ‘1’ if SYMBOL is declared in the default include files, otherwise to ‘0’. This is a once-only variant of ‘AC_CHECK_DECLS’. It generates the checking code at most once, so that ‘configure’ is smaller and faster; but the checks cannot be conditionalized and are always done once, early during the ‘configure’ run.  File: autoconf.info, Node: Structures, Next: Types, Prev: Declarations, Up: Existing Tests 5.8 Structures ============== The following macros check for the presence of certain members in C structures. If there is no macro specifically defined to check for a member you need, then you can use the general structure-member macros (*note Generic Structures::) or, for more complex tests, you may use ‘AC_COMPILE_IFELSE’ (*note Running the Compiler::). * Menu: * Particular Structures:: Macros to check for certain structure members * Generic Structures:: How to find other structure members  File: autoconf.info, Node: Particular Structures, Next: Generic Structures, Up: Structures 5.8.1 Particular Structure Checks --------------------------------- The following macros check for certain structures or structure members. -- Macro: AC_STRUCT_DIRENT_D_INO Perform all the actions of ‘AC_HEADER_DIRENT’ (*note Particular Headers::). Then, if ‘struct dirent’ contains a ‘d_ino’ member, define ‘HAVE_STRUCT_DIRENT_D_INO’. ‘HAVE_STRUCT_DIRENT_D_INO’ indicates only the presence of ‘d_ino’, not whether its contents are always reliable. Traditionally, a zero ‘d_ino’ indicated a deleted directory entry, though current systems hide this detail from the user and never return zero ‘d_ino’ values. Many current systems report an incorrect ‘d_ino’ for a directory entry that is a mount point. -- Macro: AC_STRUCT_DIRENT_D_TYPE Perform all the actions of ‘AC_HEADER_DIRENT’ (*note Particular Headers::). Then, if ‘struct dirent’ contains a ‘d_type’ member, define ‘HAVE_STRUCT_DIRENT_D_TYPE’. -- Macro: AC_STRUCT_ST_BLOCKS If ‘struct stat’ contains an ‘st_blocks’ member, define ‘HAVE_STRUCT_STAT_ST_BLOCKS’. Otherwise, require an ‘AC_LIBOBJ’ replacement of ‘fileblocks’. The former name, ‘HAVE_ST_BLOCKS’ is to be avoided, as its support will cease in the future. This macro caches its result in the ‘ac_cv_member_struct_stat_st_blocks’ variable. -- Macro: AC_STRUCT_TM If ‘time.h’ does not define ‘struct tm’, define ‘TM_IN_SYS_TIME’, which means that including ‘sys/time.h’ had better define ‘struct tm’. This macro is obsolescent, as ‘time.h’ defines ‘struct tm’ in current systems. New programs need not use this macro. -- Macro: AC_STRUCT_TIMEZONE Figure out how to get the current timezone. If ‘struct tm’ has a ‘tm_zone’ member, define ‘HAVE_STRUCT_TM_TM_ZONE’ (and the obsoleted ‘HAVE_TM_ZONE’). Otherwise, if the external array ‘tzname’ is found, define ‘HAVE_TZNAME’; if it is declared, define ‘HAVE_DECL_TZNAME’.  File: autoconf.info, Node: Generic Structures, Prev: Particular Structures, Up: Structures 5.8.2 Generic Structure Checks ------------------------------ These macros are used to find structure members not covered by the “particular” test macros. -- Macro: AC_CHECK_MEMBER (AGGREGATE.MEMBER, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [INCLUDES = ‘AC_INCLUDES_DEFAULT’]) Check whether MEMBER is a member of the aggregate AGGREGATE. If no INCLUDES are specified, the default includes are used (*note Default Includes::). AC_CHECK_MEMBER([struct passwd.pw_gecos], [], [AC_MSG_ERROR([we need 'passwd.pw_gecos'])], [[#include ]]) You can use this macro for submembers: AC_CHECK_MEMBER(struct top.middle.bot) This macro caches its result in the ‘ac_cv_member_AGGREGATE_MEMBER’ variable, with characters not suitable for a variable name mapped to underscores. -- Macro: AC_CHECK_MEMBERS (MEMBERS, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [INCLUDES = ‘AC_INCLUDES_DEFAULT’]) Check for the existence of each ‘AGGREGATE.MEMBER’ of MEMBERS using the previous macro. When MEMBER belongs to AGGREGATE, define ‘HAVE_AGGREGATE_MEMBER’ (in all capitals, with spaces and dots replaced by underscores). If ACTION-IF-FOUND is given, it is executed for each of the found members. If ACTION-IF-NOT-FOUND is given, it is executed for each of the members that could not be found. INCLUDES is a series of include directives, defaulting to ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::), which are used prior to the members under test. This macro uses M4 lists: AC_CHECK_MEMBERS([struct stat.st_rdev, struct stat.st_blksize])  File: autoconf.info, Node: Types, Next: Compilers and Preprocessors, Prev: Structures, Up: Existing Tests 5.9 Types ========= The following macros check for C types, either builtin or typedefs. If there is no macro specifically defined to check for a type you need, and you don’t need to check for any special properties of it, then you can use a general type-check macro. * Menu: * Particular Types:: Special handling to find certain types * Generic Types:: How to find other types  File: autoconf.info, Node: Particular Types, Next: Generic Types, Up: Types 5.9.1 Particular Type Checks ---------------------------- These macros check for particular C types in ‘sys/types.h’, ‘stdlib.h’, ‘stdint.h’, ‘inttypes.h’ and others, if they exist. The Gnulib ‘stdint’ module is an alternate way to define many of these symbols; it is useful if you prefer your code to assume a C99-or-better environment. *Note Gnulib::. -- Macro: AC_TYPE_GETGROUPS Define ‘GETGROUPS_T’ to be whichever of ‘gid_t’ or ‘int’ is the base type of the array argument to ‘getgroups’. This macro caches the base type in the ‘ac_cv_type_getgroups’ variable. -- Macro: AC_TYPE_INT8_T If ‘stdint.h’ or ‘inttypes.h’ does not define the type ‘int8_t’, define ‘int8_t’ to a signed integer type that is exactly 8 bits wide and that uses two’s complement representation, if such a type exists. If you are worried about porting to hosts that lack such a type, you can use the results of this macro in C89-or-later code as follows: #if HAVE_STDINT_H # include #endif #if defined INT8_MAX || defined int8_t _code using int8_t_ #else _complicated alternative using >8-bit 'signed char'_ #endif This macro caches the type in the ‘ac_cv_c_int8_t’ variable. -- Macro: AC_TYPE_INT16_T This is like ‘AC_TYPE_INT8_T’, except for 16-bit integers. -- Macro: AC_TYPE_INT32_T This is like ‘AC_TYPE_INT8_T’, except for 32-bit integers. -- Macro: AC_TYPE_INT64_T This is like ‘AC_TYPE_INT8_T’, except for 64-bit integers. -- Macro: AC_TYPE_INTMAX_T If ‘stdint.h’ or ‘inttypes.h’ defines the type ‘intmax_t’, define ‘HAVE_INTMAX_T’. Otherwise, define ‘intmax_t’ to the widest signed integer type. -- Macro: AC_TYPE_INTPTR_T If ‘stdint.h’ or ‘inttypes.h’ defines the type ‘intptr_t’, define ‘HAVE_INTPTR_T’. Otherwise, define ‘intptr_t’ to a signed integer type wide enough to hold a pointer, if such a type exists. -- Macro: AC_TYPE_LONG_DOUBLE If the C compiler supports a working ‘long double’ type, define ‘HAVE_LONG_DOUBLE’. The ‘long double’ type might have the same range and precision as ‘double’. This macro caches its result in the ‘ac_cv_type_long_double’ variable. This macro is obsolescent, as current C compilers support ‘long double’. New programs need not use this macro. -- Macro: AC_TYPE_LONG_DOUBLE_WIDER If the C compiler supports a working ‘long double’ type with more range or precision than the ‘double’ type, define ‘HAVE_LONG_DOUBLE_WIDER’. This macro caches its result in the ‘ac_cv_type_long_double_wider’ variable. -- Macro: AC_TYPE_LONG_LONG_INT If the C compiler supports a working ‘long long int’ type, define ‘HAVE_LONG_LONG_INT’. However, this test does not test ‘long long int’ values in preprocessor ‘#if’ expressions, because too many compilers mishandle such expressions. *Note Preprocessor Arithmetic::. This macro caches its result in the ‘ac_cv_type_long_long_int’ variable. -- Macro: AC_TYPE_MBSTATE_T Define ‘HAVE_MBSTATE_T’ if ‘’ declares the ‘mbstate_t’ type. Also, define ‘mbstate_t’ to be a type if ‘’ does not declare it. This macro caches its result in the ‘ac_cv_type_mbstate_t’ variable. -- Macro: AC_TYPE_MODE_T Define ‘mode_t’ to a suitable type, if standard headers do not define it. This macro caches its result in the ‘ac_cv_type_mode_t’ variable. -- Macro: AC_TYPE_OFF_T Define ‘off_t’ to a suitable type, if standard headers do not define it. This macro caches its result in the ‘ac_cv_type_off_t’ variable. -- Macro: AC_TYPE_PID_T Define ‘pid_t’ to a suitable type, if standard headers do not define it. This macro caches its result in the ‘ac_cv_type_pid_t’ variable. -- Macro: AC_TYPE_SIZE_T Define ‘size_t’ to a suitable type, if standard headers do not define it. This macro caches its result in the ‘ac_cv_type_size_t’ variable. -- Macro: AC_TYPE_SSIZE_T Define ‘ssize_t’ to a suitable type, if standard headers do not define it. This macro caches its result in the ‘ac_cv_type_ssize_t’ variable. -- Macro: AC_TYPE_UID_T Define ‘uid_t’ and ‘gid_t’ to suitable types, if standard headers do not define them. This macro caches its result in the ‘ac_cv_type_uid_t’ variable. -- Macro: AC_TYPE_UINT8_T If ‘stdint.h’ or ‘inttypes.h’ does not define the type ‘uint8_t’, define ‘uint8_t’ to an unsigned integer type that is exactly 8 bits wide, if such a type exists. This is like ‘AC_TYPE_INT8_T’, except for unsigned integers. -- Macro: AC_TYPE_UINT16_T This is like ‘AC_TYPE_UINT8_T’, except for 16-bit integers. -- Macro: AC_TYPE_UINT32_T This is like ‘AC_TYPE_UINT8_T’, except for 32-bit integers. -- Macro: AC_TYPE_UINT64_T This is like ‘AC_TYPE_UINT8_T’, except for 64-bit integers. -- Macro: AC_TYPE_UINTMAX_T If ‘stdint.h’ or ‘inttypes.h’ defines the type ‘uintmax_t’, define ‘HAVE_UINTMAX_T’. Otherwise, define ‘uintmax_t’ to the widest unsigned integer type. -- Macro: AC_TYPE_UINTPTR_T If ‘stdint.h’ or ‘inttypes.h’ defines the type ‘uintptr_t’, define ‘HAVE_UINTPTR_T’. Otherwise, define ‘uintptr_t’ to an unsigned integer type wide enough to hold a pointer, if such a type exists. -- Macro: AC_TYPE_UNSIGNED_LONG_LONG_INT If the C compiler supports a working ‘unsigned long long int’ type, define ‘HAVE_UNSIGNED_LONG_LONG_INT’. However, this test does not test ‘unsigned long long int’ values in preprocessor ‘#if’ expressions, because too many compilers mishandle such expressions. *Note Preprocessor Arithmetic::. This macro caches its result in the ‘ac_cv_type_unsigned_long_long_int’ variable.  File: autoconf.info, Node: Generic Types, Prev: Particular Types, Up: Types 5.9.2 Generic Type Checks ------------------------- These macros are used to check for types not covered by the “particular” test macros. -- Macro: AC_CHECK_TYPE (TYPE, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [INCLUDES = ‘AC_INCLUDES_DEFAULT’]) Check whether TYPE is defined. It may be a compiler builtin type or defined by the INCLUDES. INCLUDES is a series of include directives, defaulting to ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::), which are used prior to the type under test. In C, TYPE must be a type-name, so that the expression ‘sizeof (TYPE)’ is valid (but ‘sizeof ((TYPE))’ is not). The same test is applied when compiling for C++, which means that in C++ TYPE should be a type-id and should not be an anonymous ‘struct’ or ‘union’. This macro caches its result in the ‘ac_cv_type_TYPE’ variable, with ‘*’ mapped to ‘p’ and other characters not suitable for a variable name mapped to underscores. -- Macro: AC_CHECK_TYPES (TYPES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [INCLUDES = ‘AC_INCLUDES_DEFAULT’]) For each TYPE of the TYPES that is defined, define ‘HAVE_TYPE’ (in all capitals). Each TYPE must follow the rules of ‘AC_CHECK_TYPE’. If no INCLUDES are specified, the default includes are used (*note Default Includes::). If ACTION-IF-FOUND is given, it is additional shell code to execute when one of the types is found. If ACTION-IF-NOT-FOUND is given, it is executed when one of the types is not found. This macro uses M4 lists: AC_CHECK_TYPES([ptrdiff_t]) AC_CHECK_TYPES([unsigned long long int, uintmax_t]) AC_CHECK_TYPES([float_t], [], [], [[#include ]]) Autoconf, up to 2.13, used to provide to another version of ‘AC_CHECK_TYPE’, broken by design. In order to keep backward compatibility, a simple heuristic, quite safe but not totally, is implemented. In case of doubt, read the documentation of the former ‘AC_CHECK_TYPE’, see *note Obsolete Macros::.  File: autoconf.info, Node: Compilers and Preprocessors, Next: System Services, Prev: Types, Up: Existing Tests 5.10 Compilers and Preprocessors ================================ All the tests for compilers (‘AC_PROG_CC’, ‘AC_PROG_CXX’, ‘AC_PROG_F77’) define the output variable ‘EXEEXT’ based on the output of the compiler, typically to the empty string if Posix and ‘.exe’ if a DOS variant. They also define the output variable ‘OBJEXT’ based on the output of the compiler, after ‘.c’ files have been excluded, typically to ‘o’ if Posix, ‘obj’ if a DOS variant. If the compiler being used does not produce executables, the tests fail. If the executables can’t be run, and cross-compilation is not enabled, they fail too. *Note Manual Configuration::, for more on support for cross compiling. * Menu: * Specific Compiler Characteristics:: Some portability issues * Generic Compiler Characteristics:: Language independent tests and features * C Compiler:: Checking its characteristics * C++ Compiler:: Likewise * Objective C Compiler:: Likewise * Objective C++ Compiler:: Likewise * Erlang Compiler and Interpreter:: Likewise * Fortran Compiler:: Likewise * Go Compiler:: Likewise  File: autoconf.info, Node: Specific Compiler Characteristics, Next: Generic Compiler Characteristics, Up: Compilers and Preprocessors 5.10.1 Specific Compiler Characteristics ---------------------------------------- Some compilers exhibit different behaviors. Static/Dynamic Expressions Autoconf relies on a trick to extract one bit of information from the C compiler: using negative array sizes. For instance the following excerpt of a C source demonstrates how to test whether ‘int’ objects are 4 bytes wide: static int test_array[sizeof (int) == 4 ? 1 : -1]; To our knowledge, there is a single compiler that does not support this trick: the HP C compilers (the real ones, not only the “bundled”) on HP-UX 11.00. They incorrectly reject the above program with the diagnostic “Variable-length arrays cannot have static storage.” This bug comes from HP compilers’ mishandling of ‘sizeof (int)’, not from the ‘? 1 : -1’, and Autoconf works around this problem by casting ‘sizeof (int)’ to ‘long int’ before comparing it.  File: autoconf.info, Node: Generic Compiler Characteristics, Next: C Compiler, Prev: Specific Compiler Characteristics, Up: Compilers and Preprocessors 5.10.2 Generic Compiler Characteristics --------------------------------------- -- Macro: AC_CHECK_SIZEOF (TYPE-OR-EXPR, [UNUSED], [INCLUDES = ‘AC_INCLUDES_DEFAULT’]) Define ‘SIZEOF_TYPE-OR-EXPR’ (*note Standard Symbols::) to be the size in bytes of TYPE-OR-EXPR, which may be either a type or an expression returning a value that has a size. If the expression ‘sizeof (TYPE-OR-EXPR)’ is invalid, the result is 0. INCLUDES is a series of include directives, defaulting to ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::), which are used prior to the expression under test. This macro now works even when cross-compiling. The UNUSED argument was used when cross-compiling. For example, the call AC_CHECK_SIZEOF([int *]) defines ‘SIZEOF_INT_P’ to be 8 on DEC Alpha AXP systems. This macro caches its result in the ‘ac_cv_sizeof_TYPE-OR-EXPR’ variable, with ‘*’ mapped to ‘p’ and other characters not suitable for a variable name mapped to underscores. -- Macro: AC_CHECK_ALIGNOF (TYPE, [INCLUDES = ‘AC_INCLUDES_DEFAULT’]) Define ‘ALIGNOF_TYPE’ (*note Standard Symbols::) to be the alignment in bytes of TYPE. ‘TYPE y;’ must be valid as a structure member declaration. If ‘type’ is unknown, the result is 0. If no INCLUDES are specified, the default includes are used (*note Default Includes::). This macro caches its result in the ‘ac_cv_alignof_TYPE-OR-EXPR’ variable, with ‘*’ mapped to ‘p’ and other characters not suitable for a variable name mapped to underscores. -- Macro: AC_COMPUTE_INT (VAR, EXPRESSION, [INCLUDES = ‘AC_INCLUDES_DEFAULT’], [ACTION-IF-FAILS]) Store into the shell variable VAR the value of the integer EXPRESSION. The value should fit in an initializer in a C variable of type ‘signed long’. To support cross compilation (in which case, the macro only works on hosts that use twos-complement arithmetic), it should be possible to evaluate the expression at compile-time. If no INCLUDES are specified, the default includes are used (*note Default Includes::). Execute ACTION-IF-FAILS if the value cannot be determined correctly. -- Macro: AC_LANG_WERROR Normally Autoconf ignores warnings generated by the compiler, linker, and preprocessor. If this macro is used, warnings count as fatal errors for the current language. This macro is useful when the results of configuration are used where warnings are unacceptable; for instance, if parts of a program are built with the GCC ‘-Werror’ option. If the whole program is built using ‘-Werror’ it is often simpler to put ‘-Werror’ in the compiler flags (‘CFLAGS’, etc.). -- Macro: AC_OPENMP OpenMP (http://www.openmp.org/) specifies extensions of C, C++, and Fortran that simplify optimization of shared memory parallelism, which is a common problem on multi-core CPUs. If the current language is C, the macro ‘AC_OPENMP’ sets the variable ‘OPENMP_CFLAGS’ to the C compiler flags needed for supporting OpenMP. ‘OPENMP_CFLAGS’ is set to empty if the compiler already supports OpenMP, if it has no way to activate OpenMP support, or if the user rejects OpenMP support by invoking ‘configure’ with the ‘--disable-openmp’ option. ‘OPENMP_CFLAGS’ needs to be used when compiling programs, when preprocessing program source, and when linking programs. Therefore you need to add ‘$(OPENMP_CFLAGS)’ to the ‘CFLAGS’ of C programs that use OpenMP. If you preprocess OpenMP-specific C code, you also need to add ‘$(OPENMP_CFLAGS)’ to ‘CPPFLAGS’. The presence of OpenMP support is revealed at compile time by the preprocessor macro ‘_OPENMP’. Linking a program with ‘OPENMP_CFLAGS’ typically adds one more shared library to the program’s dependencies, so its use is recommended only on programs that actually require OpenMP. If the current language is C++, ‘AC_OPENMP’ sets the variable ‘OPENMP_CXXFLAGS’, suitably for the C++ compiler. The same remarks hold as for C. If the current language is Fortran 77 or Fortran, ‘AC_OPENMP’ sets the variable ‘OPENMP_FFLAGS’ or ‘OPENMP_FCFLAGS’, respectively. Similar remarks as for C hold, except that ‘CPPFLAGS’ is not used for Fortran, and no preprocessor macro signals OpenMP support. For portability, it is best to avoid spaces between ‘#’ and ‘pragma omp’. That is, write ‘#pragma omp’, not ‘# pragma omp’. The Sun WorkShop 6.2 C compiler chokes on the latter. This macro caches its result in the ‘ac_cv_prog_c_openmp’, ‘ac_cv_prog_cxx_openmp’, ‘ac_cv_prog_f77_openmp’, or ‘ac_cv_prog_fc_openmp’ variable, depending on the current language. *Caution:* Some of the compiler options that ‘AC_OPENMP’ tests, mean “enable OpenMP” to one compiler, but “write output to a file named ‘mp’ or ‘penmp’” to other compilers. We cannot guarantee that the implementation of ‘AC_OPENMP’ will not overwrite an existing file with either of these names. Therefore, as a defensive measure, a ‘configure’ script that uses ‘AC_OPENMP’ will issue an error and stop (before doing any of the operations that might overwrite these files) upon encountering either of these files in its working directory. ‘autoconf’ will also issue an error if it finds either of these files in the same directory as a ‘configure.ac’ that uses ‘AC_OPENMP’. If you have files with either of these names at the top level of your source tree, and you need to use ‘AC_OPENMP’, we recommend you either change their names or move them into a subdirectory.  File: autoconf.info, Node: C Compiler, Next: C++ Compiler, Prev: Generic Compiler Characteristics, Up: Compilers and Preprocessors 5.10.3 C Compiler Characteristics --------------------------------- The following macros provide ways to find and exercise a C Compiler. There are a few constructs that ought to be avoided, but do not deserve being checked for, since they can easily be worked around. Don’t use lines containing solitary backslashes They tickle a bug in the HP-UX C compiler (checked on HP-UX 10.20, 11.00, and 11i). When given the following source: #ifdef __STDC__ /\ * A comment with backslash-newlines in it. %{ %} *\ \ / char str[] = "\\ " A string with backslash-newlines in it %{ %} \\ ""; char apostrophe = '\\ \ '\ '; #endif the compiler incorrectly fails with the diagnostics “Non-terminating comment at end of file” and “Missing ‘#endif’ at end of file.” Removing the lines with solitary backslashes solves the problem. Don’t compile several files at once if output matters to you Some compilers, such as HP’s, report names of files being compiled when given more than one file operand. For instance: $ cc a.c b.c a.c: b.c: This can cause problems if you observe the output of the compiler to detect failures. Invoking ‘cc -c a.c && cc -c b.c && cc -o c a.o b.o’ solves the issue. Don’t rely on ‘#error’ failing The IRIX C compiler does not fail when #error is preprocessed; it simply emits a diagnostic and continues, exiting successfully. So, instead of an error directive like ‘#error "Unsupported word size"’ it is more portable to use an invalid directive like ‘#Unsupported word size’ in Autoconf tests. In ordinary source code, ‘#error’ is OK, since installers with inadequate compilers like IRIX can simply examine these compilers’ diagnostic output. Don’t rely on correct ‘#line’ support On Solaris, ‘c89’ (at least Sun C 5.3 through 5.8) diagnoses ‘#line’ directives whose line numbers are greater than 32767. Nothing in Posix makes this invalid. That is why Autoconf stopped issuing ‘#line’ directives. -- Macro: AC_PROG_CC ([COMPILER-SEARCH-LIST]) Determine a C compiler to use. If the environment variable ‘CC’ is set, its value will be taken as the name of the C compiler to use. Otherwise, search for a C compiler under a series of likely names, trying ‘gcc’ and ‘cc’ first. Regardless, the output variable ‘CC’ is set to the chosen compiler. If the optional first argument to the macro is used, it must be a whitespace-separated list of potential names for a C compiler, which overrides the built-in list. If no C compiler can be found, ‘configure’ will error out. If the selected C compiler is found to be GNU C (regardless of its name), the shell variable ‘GCC’ will be set to ‘yes’. If the shell variable ‘CFLAGS’ was not already set, it is set to ‘-g -O2’ for the GNU C compiler (‘-O2’ on systems where GCC does not accept ‘-g’), or ‘-g’ for other compilers. ‘CFLAGS’ is then made an output variable. You can override the default for ‘CFLAGS’ by inserting a shell default assignment between ‘AC_INIT’ and ‘AC_PROG_CC’: : ${CFLAGS="OPTIONS"} where OPTIONS are the appropriate set of options to use by default. (It is important to use this construct rather than a normal assignment, so that ‘CFLAGS’ can still be overridden by the person building the package. *Note Preset Output Variables::.) If necessary, options are added to ‘CC’ to enable support for ISO Standard C features with extensions, preferring the newest edition of the C standard that is supported. Currently the newest edition Autoconf knows how to detect support for is ISO C 2011. After calling this macro you can check whether the C compiler has been set to accept standard C by inspecting the shell variable ‘ac_prog_cc_stdc’. Its value will be ‘c11’, ‘c99’, or ‘c89’, respectively, if the C compiler has been set to use the 2011, 1999, or 1990 edition of the C standard, and ‘no’ if the compiler does not support compiling standard C at all. The tests for standard conformance are not comprehensive. They test the values of ‘__STDC__’ and ‘__STDC_VERSION__’, and a representative sample of the language features added in each version of the C standard. They do not test the C standard library, because the C compiler might be generating code for a “freestanding environment” (in which most of the standard library is optional). If you need to know whether a particular C standard header exists, use ‘AC_CHECK_HEADER’. None of the options that may be added to ‘CC’ by this macro enable _strict_ conformance to the C standard. In particular, system-specific extensions are not disabled. (For example, for GNU C, the ‘-std=gnuNN’ options may be used, but not the ‘-std=cNN’ options.) Many Autoconf macros use a compiler, and thus call ‘AC_REQUIRE([AC_PROG_CC])’ to ensure that the compiler has been determined before the body of the outermost ‘AC_DEFUN’ macro. Although ‘AC_PROG_CC’ is safe to directly expand multiple times, it performs certain checks (such as the proper value of ‘EXEEXT’) only on the first invocation. Therefore, care must be used when invoking this macro from within another macro rather than at the top level (*note Expanded Before Required::). -- Macro: AC_PROG_CC_C_O If the C compiler does not accept the ‘-c’ and ‘-o’ options simultaneously, define ‘NO_MINUS_C_MINUS_O’. This macro actually tests both the compiler found by ‘AC_PROG_CC’, and, if different, the first ‘cc’ in the path. The test fails if one fails. This macro was created for GNU Make to choose the default C compilation rule. For the compiler COMPILER, this macro caches its result in the ‘ac_cv_prog_cc_COMPILER_c_o’ variable. -- Macro: AC_PROG_CPP Set output variable ‘CPP’ to a command that runs the C preprocessor. If ‘$CC -E’ doesn’t work, tries ‘cpp’ and ‘/lib/cpp’, in that order. It is only portable to run ‘CPP’ on files with a ‘.c’ extension. Some preprocessors don’t indicate missing include files by the error status. For such preprocessors an internal variable is set that causes other macros to check the standard error from the preprocessor and consider the test failed if any warnings have been reported. For most preprocessors, though, warnings do not cause include-file tests to fail unless ‘AC_PROG_CPP_WERROR’ is also specified. -- Macro: AC_PROG_CPP_WERROR This acts like ‘AC_PROG_CPP’, except it treats warnings from the preprocessor as errors even if the preprocessor exit status indicates success. This is useful for avoiding headers that generate mandatory warnings, such as deprecation notices. The following macros check for C compiler or machine architecture features. To check for characteristics not listed here, use ‘AC_COMPILE_IFELSE’ (*note Running the Compiler::) or ‘AC_RUN_IFELSE’ (*note Runtime::). -- Macro: AC_C_BACKSLASH_A Define ‘HAVE_C_BACKSLASH_A’ to 1 if the C compiler understands ‘\a’. This macro is obsolescent, as current C compilers understand ‘\a’. New programs need not use this macro. -- Macro: AC_C_BIGENDIAN ([ACTION-IF-TRUE], [ACTION-IF-FALSE], [ACTION-IF-UNKNOWN], [ACTION-IF-UNIVERSAL]) If words are stored with the most significant byte first (like Motorola and SPARC CPUs), execute ACTION-IF-TRUE. If words are stored with the least significant byte first (like Intel and VAX CPUs), execute ACTION-IF-FALSE. This macro runs a test-case if endianness cannot be determined from the system header files. When cross-compiling, the test-case is not run but grep’ed for some magic values. ACTION-IF-UNKNOWN is executed if the latter case fails to determine the byte sex of the host system. In some cases a single run of a compiler can generate code for multiple architectures. This can happen, for example, when generating Mac OS X universal binary files, which work on both PowerPC and Intel architectures. In this case, the different variants might be for architectures with differing endianness. If ‘configure’ detects this, it executes ACTION-IF-UNIVERSAL instead of ACTION-IF-UNKNOWN. The default for ACTION-IF-TRUE is to define ‘WORDS_BIGENDIAN’. The default for ACTION-IF-FALSE is to do nothing. The default for ACTION-IF-UNKNOWN is to abort configure and tell the installer how to bypass this test. And finally, the default for ACTION-IF-UNIVERSAL is to ensure that ‘WORDS_BIGENDIAN’ is defined if and only if a universal build is detected and the current code is big-endian; this default works only if ‘autoheader’ is used (*note autoheader Invocation::). If you use this macro without specifying ACTION-IF-UNIVERSAL, you should also use ‘AC_CONFIG_HEADERS’; otherwise ‘WORDS_BIGENDIAN’ may be set incorrectly for Mac OS X universal binary files. -- Macro: AC_C_CONST If the C compiler does not fully support the ‘const’ keyword, define ‘const’ to be empty. Some C compilers that do not define ‘__STDC__’ do support ‘const’; some compilers that define ‘__STDC__’ do not completely support ‘const’. Programs can simply use ‘const’ as if every C compiler supported it; for those that don’t, the makefile or configuration header file defines it as empty. Occasionally installers use a C++ compiler to compile C code, typically because they lack a C compiler. This causes problems with ‘const’, because C and C++ treat ‘const’ differently. For example: const int foo; is valid in C but not in C++. These differences unfortunately cannot be papered over by defining ‘const’ to be empty. If ‘autoconf’ detects this situation, it leaves ‘const’ alone, as this generally yields better results in practice. However, using a C++ compiler to compile C code is not recommended or supported, and installers who run into trouble in this area should get a C compiler like GCC to compile their C code. This macro caches its result in the ‘ac_cv_c_const’ variable. This macro is obsolescent, as current C compilers support ‘const’. New programs need not use this macro. -- Macro: AC_C__GENERIC If the C compiler supports C11-style generic selection using the ‘_Generic’ keyword, define ‘HAVE_C__GENERIC’. -- Macro: AC_C_RESTRICT If the C compiler recognizes a variant spelling for the ‘restrict’ keyword (‘__restrict’, ‘__restrict__’, or ‘_Restrict’), then define ‘restrict’ to that; this is more likely to do the right thing with compilers that support language variants where plain ‘restrict’ is not a keyword. Otherwise, if the C compiler recognizes the ‘restrict’ keyword, don’t do anything. Otherwise, define ‘restrict’ to be empty. Thus, programs may simply use ‘restrict’ as if every C compiler supported it; for those that do not, the makefile or configuration header defines it away. Although support in C++ for the ‘restrict’ keyword is not required, several C++ compilers do accept the keyword. This macro works for them, too. This macro caches ‘no’ in the ‘ac_cv_c_restrict’ variable if ‘restrict’ is not supported, and a supported spelling otherwise. -- Macro: AC_C_VOLATILE If the C compiler does not understand the keyword ‘volatile’, define ‘volatile’ to be empty. Programs can simply use ‘volatile’ as if every C compiler supported it; for those that do not, the makefile or configuration header defines it as empty. If the correctness of your program depends on the semantics of ‘volatile’, simply defining it to be empty does, in a sense, break your code. However, given that the compiler does not support ‘volatile’, you are at its mercy anyway. At least your program compiles, when it wouldn’t before. *Note Volatile Objects::, for more about ‘volatile’. In general, the ‘volatile’ keyword is a standard C feature, so you might expect that ‘volatile’ is available only when ‘__STDC__’ is defined. However, Ultrix 4.3’s native compiler does support volatile, but does not define ‘__STDC__’. This macro is obsolescent, as current C compilers support ‘volatile’. New programs need not use this macro. -- Macro: AC_C_INLINE If the C compiler supports the keyword ‘inline’, do nothing. Otherwise define ‘inline’ to ‘__inline__’ or ‘__inline’ if it accepts one of those, otherwise define ‘inline’ to be empty. -- Macro: AC_C_CHAR_UNSIGNED If the C type ‘char’ is unsigned, define ‘__CHAR_UNSIGNED__’, unless the C compiler predefines it. These days, using this macro is not necessary. The same information can be determined by this portable alternative, thus avoiding the use of preprocessor macros in the namespace reserved for the implementation. #include #if CHAR_MIN == 0 # define CHAR_UNSIGNED 1 #endif -- Macro: AC_C_STRINGIZE If the C preprocessor supports the stringizing operator, define ‘HAVE_STRINGIZE’. The stringizing operator is ‘#’ and is found in macros such as this: #define x(y) #y This macro is obsolescent, as current C compilers support the stringizing operator. New programs need not use this macro. -- Macro: AC_C_FLEXIBLE_ARRAY_MEMBER If the C compiler supports flexible array members, define ‘FLEXIBLE_ARRAY_MEMBER’ to nothing; otherwise define it to 1. That way, a declaration like this: struct s { size_t n_vals; double val[FLEXIBLE_ARRAY_MEMBER]; }; will let applications use the “struct hack” even with compilers that do not support flexible array members. To allocate and use such an object, you can use code like this: size_t i; size_t n = compute_value_count (); struct s *p = malloc (offsetof (struct s, val) + n * sizeof (double)); p->n_vals = n; for (i = 0; i < n; i++) p->val[i] = compute_value (i); -- Macro: AC_C_VARARRAYS If the C compiler does not support variable-length arrays, define the macro ‘__STDC_NO_VLA__’ to be 1 if it is not already defined. A variable-length array is an array of automatic storage duration whose length is determined at run time, when the array is declared. For backward compatibility this macro also defines ‘HAVE_C_VARARRAYS’ if the C compiler supports variable-length arrays, but this usage is obsolescent and new programs should use ‘__STDC_NO_VLA__’. -- Macro: AC_C_TYPEOF If the C compiler supports GNU C’s ‘typeof’ syntax either directly or through a different spelling of the keyword (e.g., ‘__typeof__’), define ‘HAVE_TYPEOF’. If the support is available only through a different spelling, define ‘typeof’ to that spelling. -- Macro: AC_C_PROTOTYPES If function prototypes are understood by the compiler (as determined by ‘AC_PROG_CC’), define ‘PROTOTYPES’ and ‘__PROTOTYPES’. Defining ‘__PROTOTYPES’ is for the benefit of header files that cannot use macros that infringe on user name space. This macro is obsolescent, as current C compilers support prototypes. New programs need not use this macro. -- Macro: AC_PROG_GCC_TRADITIONAL Add ‘-traditional’ to output variable ‘CC’ if using a GNU C compiler and ‘ioctl’ does not work properly without ‘-traditional’. That usually happens when the fixed header files have not been installed on an old system. This macro is obsolescent, since current versions of the GNU C compiler fix the header files automatically when installed.  File: autoconf.info, Node: C++ Compiler, Next: Objective C Compiler, Prev: C Compiler, Up: Compilers and Preprocessors 5.10.4 C++ Compiler Characteristics ----------------------------------- -- Macro: AC_PROG_CXX ([COMPILER-SEARCH-LIST]) Determine a C++ compiler to use. If either the environment variable ‘CXX’ or the environment variable ‘CCC’ is set, its value will be taken as the name of a C++ compiler. If both are set, ‘CXX’ is preferred. If neither are set, search for a C++ compiler under a series of likely names, trying ‘g++’ and ‘c++’ first. Regardless, the output variable ‘CXX’ is set to the chosen compiler. If the optional first argument to the macro is used, it must be a whitespace-separated list of potential names for a C++ compiler, which overrides the built-in list. If no C++ compiler can be found, as a last resort ‘CXX’ is set to ‘g++’ (and subsequent tests will probably fail). If the selected C++ compiler is found to be GNU C++ (regardless of its name), the shell variable ‘GXX’ will be set to ‘yes’. If the shell variable ‘CXXFLAGS’ was not already set, it is set to ‘-g -O2’ for the GNU C++ compiler (‘-O2’ on systems where G++ does not accept ‘-g’), or ‘-g’ for other compilers. ‘CXXFLAGS’ is then made an output variable. You can override the default for ‘CXXFLAGS’ by inserting a shell default assignment between ‘AC_INIT’ and ‘AC_PROG_CXX’: : ${CXXFLAGS="OPTIONS"} where OPTIONS are the appropriate set of options to use by default. (It is important to use this construct rather than a normal assignment, so that ‘CXXFLAGS’ can still be overridden by the person building the package. *Note Preset Output Variables::.) If necessary, options are added to ‘CXX’ to enable support for ISO Standard C++ features with extensions, preferring the newest edition of the C++ standard that is supported. Currently the newest edition Autoconf knows how to detect support for is ISO C++ 2011. After calling this macro, you can check whether the C++ compiler has been set to accept standard C++ by inspecting the shell variable ‘ac_prog_cc_stdc’. Its value will be ‘cxx11’ or ‘cxx98’, respectively, if the C++ compiler has been set to use the 2011 or 1990 edition of the C++ standard, and ‘no’ if the compiler does not support compiling standard C++ at all. The tests for standard conformance are not comprehensive. They test the value of ‘__cplusplus’ and a representative sample of the language features added in each version of the C++ standard. They do not test the C++ standard library, because this can be extremely slow, and because the C++ compiler might be generating code for a “freestanding environment” (in which most of the C++ standard library is optional). If you need to know whether a particular C++ standard header exists, use ‘AC_CHECK_HEADER’. None of the options that may be added to ‘CXX’ by this macro enable _strict_ conformance to the C++ standard. In particular, system-specific extensions are not disabled. (For example, for GNU C++, the ‘-std=gnu++NN’ options may be used, but not the ‘-std=c++NN’ options.) -- Macro: AC_PROG_CXXCPP Set output variable ‘CXXCPP’ to a command that runs the C++ preprocessor. If ‘$CXX -E’ doesn’t work, tries ‘cpp’ and ‘/lib/cpp’, in that order. Because of this fallback, ‘CXXCPP’ may or may not set C++-specific predefined macros (such as ‘__cplusplus’). It is portable to run ‘CXXCPP’ only on files with a ‘.c’, ‘.C’, ‘.cc’, or ‘.cpp’ extension. Some preprocessors don’t indicate missing include files by the error status. For such preprocessors an internal variable is set that causes other macros to check the standard error from the preprocessor and consider the test failed if any warnings have been reported. However, it is not known whether such broken preprocessors exist for C++. -- Macro: AC_PROG_CXX_C_O Test whether the C++ compiler accepts the options ‘-c’ and ‘-o’ simultaneously, and define ‘CXX_NO_MINUS_C_MINUS_O’, if it does not.  File: autoconf.info, Node: Objective C Compiler, Next: Objective C++ Compiler, Prev: C++ Compiler, Up: Compilers and Preprocessors 5.10.5 Objective C Compiler Characteristics ------------------------------------------- -- Macro: AC_PROG_OBJC ([COMPILER-SEARCH-LIST]) Determine an Objective C compiler to use. If ‘OBJC’ is not already set in the environment, check for Objective C compilers. Set output variable ‘OBJC’ to the name of the compiler found. This macro may, however, be invoked with an optional first argument which, if specified, must be a blank-separated list of Objective C compilers to search for. This just gives the user an opportunity to specify an alternative search list for the Objective C compiler. For example, if you didn’t like the default order, then you could invoke ‘AC_PROG_OBJC’ like this: AC_PROG_OBJC([gcc objcc objc]) If using a compiler that supports GNU Objective C, set shell variable ‘GOBJC’ to ‘yes’. If output variable ‘OBJCFLAGS’ was not already set, set it to ‘-g -O2’ for a GNU Objective C compiler (‘-O2’ on systems where the compiler does not accept ‘-g’), or ‘-g’ for other compilers. -- Macro: AC_PROG_OBJCPP Set output variable ‘OBJCPP’ to a command that runs the Objective C preprocessor. If ‘$OBJC -E’ doesn’t work, tries ‘cpp’ and ‘/lib/cpp’, in that order. Because of this fallback, ‘CXXCPP’ may or may not set Objective-C-specific predefined macros (such as ‘__OBJC__’).  File: autoconf.info, Node: Objective C++ Compiler, Next: Erlang Compiler and Interpreter, Prev: Objective C Compiler, Up: Compilers and Preprocessors 5.10.6 Objective C++ Compiler Characteristics --------------------------------------------- -- Macro: AC_PROG_OBJCXX ([COMPILER-SEARCH-LIST]) Determine an Objective C++ compiler to use. If ‘OBJCXX’ is not already set in the environment, check for Objective C++ compilers. Set output variable ‘OBJCXX’ to the name of the compiler found. This macro may, however, be invoked with an optional first argument which, if specified, must be a blank-separated list of Objective C++ compilers to search for. This just gives the user an opportunity to specify an alternative search list for the Objective C++ compiler. For example, if you didn’t like the default order, then you could invoke ‘AC_PROG_OBJCXX’ like this: AC_PROG_OBJCXX([gcc g++ objcc++ objcxx]) If using a compiler that supports GNU Objective C++, set shell variable ‘GOBJCXX’ to ‘yes’. If output variable ‘OBJCXXFLAGS’ was not already set, set it to ‘-g -O2’ for a GNU Objective C++ compiler (‘-O2’ on systems where the compiler does not accept ‘-g’), or ‘-g’ for other compilers. -- Macro: AC_PROG_OBJCXXCPP Set output variable ‘OBJCXXCPP’ to a command that runs the Objective C++ preprocessor. If ‘$OBJCXX -E’ doesn’t work, tries ‘cpp’ and ‘/lib/cpp’, in that order. Because of this fallback, ‘CXXCPP’ may or may not set Objective-C++-specific predefined macros (such as ‘__cplusplus’ and ‘__OBJC__’).  File: autoconf.info, Node: Erlang Compiler and Interpreter, Next: Fortran Compiler, Prev: Objective C++ Compiler, Up: Compilers and Preprocessors 5.10.7 Erlang Compiler and Interpreter Characteristics ------------------------------------------------------ Autoconf defines the following macros for determining paths to the essential Erlang/OTP programs: -- Macro: AC_ERLANG_PATH_ERLC ([VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Determine an Erlang compiler to use. If ‘ERLC’ is not already set in the environment, check for ‘erlc’. Set output variable ‘ERLC’ to the complete path of the compiler command found. In addition, if ‘ERLCFLAGS’ is not set in the environment, set it to an empty value. The two optional arguments have the same meaning as the two last arguments of macro ‘AC_PATH_PROG’ for looking for the ‘erlc’ program. For example, to look for ‘erlc’ only in the ‘/usr/lib/erlang/bin’ directory: AC_ERLANG_PATH_ERLC([not found], [/usr/lib/erlang/bin]) -- Macro: AC_ERLANG_NEED_ERLC ([PATH = ‘$PATH’]) A simplified variant of the ‘AC_ERLANG_PATH_ERLC’ macro, that prints an error message and exits the ‘configure’ script if the ‘erlc’ program is not found. -- Macro: AC_ERLANG_PATH_ERL ([VALUE-IF-NOT-FOUND], [PATH = ‘$PATH’]) Determine an Erlang interpreter to use. If ‘ERL’ is not already set in the environment, check for ‘erl’. Set output variable ‘ERL’ to the complete path of the interpreter command found. The two optional arguments have the same meaning as the two last arguments of macro ‘AC_PATH_PROG’ for looking for the ‘erl’ program. For example, to look for ‘erl’ only in the ‘/usr/lib/erlang/bin’ directory: AC_ERLANG_PATH_ERL([not found], [/usr/lib/erlang/bin]) -- Macro: AC_ERLANG_NEED_ERL ([PATH = ‘$PATH’]) A simplified variant of the ‘AC_ERLANG_PATH_ERL’ macro, that prints an error message and exits the ‘configure’ script if the ‘erl’ program is not found.  File: autoconf.info, Node: Fortran Compiler, Next: Go Compiler, Prev: Erlang Compiler and Interpreter, Up: Compilers and Preprocessors 5.10.8 Fortran Compiler Characteristics --------------------------------------- The Autoconf Fortran support is divided into two categories: legacy Fortran 77 macros (‘F77’), and modern Fortran macros (‘FC’). The former are intended for traditional Fortran 77 code, and have output variables like ‘F77’, ‘FFLAGS’, and ‘FLIBS’. The latter are for newer programs that can (or must) compile under the newer Fortran standards, and have output variables like ‘FC’, ‘FCFLAGS’, and ‘FCLIBS’. Except for the macros ‘AC_FC_SRCEXT’, ‘AC_FC_FREEFORM’, ‘AC_FC_FIXEDFORM’, and ‘AC_FC_LINE_LENGTH’ (see below), the ‘FC’ and ‘F77’ macros behave almost identically, and so they are documented together in this section. -- Macro: AC_PROG_F77 ([COMPILER-SEARCH-LIST]) Determine a Fortran 77 compiler to use. If ‘F77’ is not already set in the environment, then check for ‘g77’ and ‘f77’, and then some other names. Set the output variable ‘F77’ to the name of the compiler found. This macro may, however, be invoked with an optional first argument which, if specified, must be a blank-separated list of Fortran 77 compilers to search for. This just gives the user an opportunity to specify an alternative search list for the Fortran 77 compiler. For example, if you didn’t like the default order, then you could invoke ‘AC_PROG_F77’ like this: AC_PROG_F77([fl32 f77 fort77 xlf g77 f90 xlf90]) If using a compiler that supports GNU Fortran 77, set the shell variable ‘G77’ to ‘yes’. If the output variable ‘FFLAGS’ was not already set in the environment, set it to ‘-g -02’ for ‘g77’ (or ‘-O2’ where the GNU Fortran 77 compiler does not accept ‘-g’), or ‘-g’ for other compilers. The result of the GNU test is cached in the ‘ac_cv_f77_compiler_gnu’ variable, acceptance of ‘-g’ in the ‘ac_cv_prog_f77_g’ variable. -- Macro: AC_PROG_FC ([COMPILER-SEARCH-LIST], [DIALECT]) Determine a Fortran compiler to use. If ‘FC’ is not already set in the environment, then ‘dialect’ is a hint to indicate what Fortran dialect to search for; the default is to search for the newest available dialect. Set the output variable ‘FC’ to the name of the compiler found. By default, newer dialects are preferred over older dialects, but if ‘dialect’ is specified then older dialects are preferred starting with the specified dialect. ‘dialect’ can currently be one of Fortran 77, Fortran 90, or Fortran 95. However, this is only a hint of which compiler _name_ to prefer (e.g., ‘f90’ or ‘f95’), and no attempt is made to guarantee that a particular language standard is actually supported. Thus, it is preferable that you avoid the ‘dialect’ option, and use AC_PROG_FC only for code compatible with the latest Fortran standard. This macro may, alternatively, be invoked with an optional first argument which, if specified, must be a blank-separated list of Fortran compilers to search for, just as in ‘AC_PROG_F77’. If using a compiler that supports GNU Fortran, set the shell variable ‘GFC’ to ‘yes’. If the output variable ‘FCFLAGS’ was not already set in the environment, then set it to ‘-g -02’ for a GNU Fortran compiler (or ‘-O2’ where the compiler does not accept ‘-g’), or ‘-g’ for other compilers. The result of the GNU test is cached in the ‘ac_cv_fc_compiler_gnu’ variable, acceptance of ‘-g’ in the ‘ac_cv_prog_fc_g’ variable. -- Macro: AC_PROG_F77_C_O -- Macro: AC_PROG_FC_C_O Test whether the Fortran compiler accepts the options ‘-c’ and ‘-o’ simultaneously, and define ‘F77_NO_MINUS_C_MINUS_O’ or ‘FC_NO_MINUS_C_MINUS_O’, respectively, if it does not. The result of the test is cached in the ‘ac_cv_prog_f77_c_o’ or ‘ac_cv_prog_fc_c_o’ variable, respectively. The following macros check for Fortran compiler characteristics. To check for characteristics not listed here, use ‘AC_COMPILE_IFELSE’ (*note Running the Compiler::) or ‘AC_RUN_IFELSE’ (*note Runtime::), making sure to first set the current language to Fortran 77 or Fortran via ‘AC_LANG([Fortran 77])’ or ‘AC_LANG(Fortran)’ (*note Language Choice::). -- Macro: AC_F77_LIBRARY_LDFLAGS -- Macro: AC_FC_LIBRARY_LDFLAGS Determine the linker flags (e.g., ‘-L’ and ‘-l’) for the “Fortran intrinsic and runtime libraries” that are required to successfully link a Fortran program or shared library. The output variable ‘FLIBS’ or ‘FCLIBS’ is set to these flags (which should be included after ‘LIBS’ when linking). This macro is intended to be used in those situations when it is necessary to mix, e.g., C++ and Fortran source code in a single program or shared library (*note (automake)Mixing Fortran 77 With C and C++::). For example, if object files from a C++ and Fortran compiler must be linked together, then the C++ compiler/linker must be used for linking (since special C++-ish things need to happen at link time like calling global constructors, instantiating templates, enabling exception support, etc.). However, the Fortran intrinsic and runtime libraries must be linked in as well, but the C++ compiler/linker doesn’t know by default how to add these Fortran 77 libraries. Hence, this macro was created to determine these Fortran libraries. The macros ‘AC_F77_DUMMY_MAIN’ and ‘AC_FC_DUMMY_MAIN’ or ‘AC_F77_MAIN’ and ‘AC_FC_MAIN’ are probably also necessary to link C/C++ with Fortran; see below. Further, it is highly recommended that you use ‘AC_CONFIG_HEADERS’ (*note Configuration Headers::) because the complex defines that the function wrapper macros create may not work with C/C++ compiler drivers. These macros internally compute the flag needed to verbose linking output and cache it in ‘ac_cv_prog_f77_v’ or ‘ac_cv_prog_fc_v’ variables, respectively. The computed linker flags are cached in ‘ac_cv_f77_libs’ or ‘ac_cv_fc_libs’, respectively. -- Macro: AC_F77_DUMMY_MAIN ([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND = ‘AC_MSG_FAILURE’]) -- Macro: AC_FC_DUMMY_MAIN ([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND = ‘AC_MSG_FAILURE’]) With many compilers, the Fortran libraries detected by ‘AC_F77_LIBRARY_LDFLAGS’ or ‘AC_FC_LIBRARY_LDFLAGS’ provide their own ‘main’ entry function that initializes things like Fortran I/O, and which then calls a user-provided entry function named (say) ‘MAIN__’ to run the user’s program. The ‘AC_F77_DUMMY_MAIN’ and ‘AC_FC_DUMMY_MAIN’ or ‘AC_F77_MAIN’ and ‘AC_FC_MAIN’ macros figure out how to deal with this interaction. When using Fortran for purely numerical functions (no I/O, etc.) often one prefers to provide one’s own ‘main’ and skip the Fortran library initializations. In this case, however, one may still need to provide a dummy ‘MAIN__’ routine in order to prevent linking errors on some systems. ‘AC_F77_DUMMY_MAIN’ or ‘AC_FC_DUMMY_MAIN’ detects whether any such routine is _required_ for linking, and what its name is; the shell variable ‘F77_DUMMY_MAIN’ or ‘FC_DUMMY_MAIN’ holds this name, ‘unknown’ when no solution was found, and ‘none’ when no such dummy main is needed. By default, ACTION-IF-FOUND defines ‘F77_DUMMY_MAIN’ or ‘FC_DUMMY_MAIN’ to the name of this routine (e.g., ‘MAIN__’) _if_ it is required. ACTION-IF-NOT-FOUND defaults to exiting with an error. In order to link with Fortran routines, the user’s C/C++ program should then include the following code to define the dummy main if it is needed: #ifdef F77_DUMMY_MAIN # ifdef __cplusplus extern "C" # endif int F77_DUMMY_MAIN () { return 1; } #endif (Replace ‘F77’ with ‘FC’ for Fortran instead of Fortran 77.) Note that this macro is called automatically from ‘AC_F77_WRAPPERS’ or ‘AC_FC_WRAPPERS’; there is generally no need to call it explicitly unless one wants to change the default actions. The result of this macro is cached in the ‘ac_cv_f77_dummy_main’ or ‘ac_cv_fc_dummy_main’ variable, respectively. -- Macro: AC_F77_MAIN -- Macro: AC_FC_MAIN As discussed above, many Fortran libraries allow you to provide an entry point called (say) ‘MAIN__’ instead of the usual ‘main’, which is then called by a ‘main’ function in the Fortran libraries that initializes things like Fortran I/O. The ‘AC_F77_MAIN’ and ‘AC_FC_MAIN’ macros detect whether it is _possible_ to utilize such an alternate main function, and defines ‘F77_MAIN’ and ‘FC_MAIN’ to the name of the function. (If no alternate main function name is found, ‘F77_MAIN’ and ‘FC_MAIN’ are simply defined to ‘main’.) Thus, when calling Fortran routines from C that perform things like I/O, one should use this macro and declare the "main" function like so: #ifdef __cplusplus extern "C" #endif int F77_MAIN (int argc, char *argv[]); (Again, replace ‘F77’ with ‘FC’ for Fortran instead of Fortran 77.) The result of this macro is cached in the ‘ac_cv_f77_main’ or ‘ac_cv_fc_main’ variable, respectively. -- Macro: AC_F77_WRAPPERS -- Macro: AC_FC_WRAPPERS Defines C macros ‘F77_FUNC (name, NAME)’, ‘FC_FUNC (name, NAME)’, ‘F77_FUNC_(name, NAME)’, and ‘FC_FUNC_(name, NAME)’ to properly mangle the names of C/C++ identifiers, and identifiers with underscores, respectively, so that they match the name-mangling scheme used by the Fortran compiler. Fortran is case-insensitive, and in order to achieve this the Fortran compiler converts all identifiers into a canonical case and format. To call a Fortran subroutine from C or to write a C function that is callable from Fortran, the C program must explicitly use identifiers in the format expected by the Fortran compiler. In order to do this, one simply wraps all C identifiers in one of the macros provided by ‘AC_F77_WRAPPERS’ or ‘AC_FC_WRAPPERS’. For example, suppose you have the following Fortran 77 subroutine: subroutine foobar (x, y) double precision x, y y = 3.14159 * x return end You would then declare its prototype in C or C++ as: #define FOOBAR_F77 F77_FUNC (foobar, FOOBAR) #ifdef __cplusplus extern "C" /* prevent C++ name mangling */ #endif void FOOBAR_F77 (double *x, double *y); Note that we pass both the lowercase and uppercase versions of the function name to ‘F77_FUNC’ so that it can select the right one. Note also that all parameters to Fortran 77 routines are passed as pointers (*note (automake)Mixing Fortran 77 With C and C++::). (Replace ‘F77’ with ‘FC’ for Fortran instead of Fortran 77.) Although Autoconf tries to be intelligent about detecting the name-mangling scheme of the Fortran compiler, there may be Fortran compilers that it doesn’t support yet. In this case, the above code generates a compile-time error, but some other behavior (e.g., disabling Fortran-related features) can be induced by checking whether ‘F77_FUNC’ or ‘FC_FUNC’ is defined. Now, to call that routine from a C program, we would do something like: { double x = 2.7183, y; FOOBAR_F77 (&x, &y); } If the Fortran identifier contains an underscore (e.g., ‘foo_bar’), you should use ‘F77_FUNC_’ or ‘FC_FUNC_’ instead of ‘F77_FUNC’ or ‘FC_FUNC’ (with the same arguments). This is because some Fortran compilers mangle names differently if they contain an underscore. The name mangling scheme is encoded in the ‘ac_cv_f77_mangling’ or ‘ac_cv_fc_mangling’ cache variable, respectively, and also used for the ‘AC_F77_FUNC’ and ‘AC_FC_FUNC’ macros described below. -- Macro: AC_F77_FUNC (NAME, [SHELLVAR]) -- Macro: AC_FC_FUNC (NAME, [SHELLVAR]) Given an identifier NAME, set the shell variable SHELLVAR to hold the mangled version NAME according to the rules of the Fortran linker (see also ‘AC_F77_WRAPPERS’ or ‘AC_FC_WRAPPERS’). SHELLVAR is optional; if it is not supplied, the shell variable is simply NAME. The purpose of this macro is to give the caller a way to access the name-mangling information other than through the C preprocessor as above, for example, to call Fortran routines from some language other than C/C++. -- Macro: AC_FC_SRCEXT (EXT, [ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) -- Macro: AC_FC_PP_SRCEXT (EXT, [ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) By default, the ‘FC’ macros perform their tests using a ‘.f’ extension for source-code files. Some compilers, however, only enable newer language features for appropriately named files, e.g., Fortran 90 features only for ‘.f90’ files, or preprocessing only with ‘.F’ files or maybe other upper-case extensions. On the other hand, some other compilers expect all source files to end in ‘.f’ and require special flags to support other file name extensions. The ‘AC_FC_SRCEXT’ and ‘AC_FC_PP_SRCEXT’ macros deal with these issues. The ‘AC_FC_SRCEXT’ macro tries to get the ‘FC’ compiler to accept files ending with the extension ‘.EXT’ (i.e., EXT does _not_ contain the dot). If any special compiler flags are needed for this, it stores them in the output variable ‘FCFLAGS_EXT’. This extension and these flags are then used for all subsequent ‘FC’ tests (until ‘AC_FC_SRCEXT’ or ‘AC_FC_PP_SRCEXT’ is called another time). For example, you would use ‘AC_FC_SRCEXT(f90)’ to employ the ‘.f90’ extension in future tests, and it would set the ‘FCFLAGS_f90’ output variable with any extra flags that are needed to compile such files. Similarly, the ‘AC_FC_PP_SRCEXT’ macro tries to get the ‘FC’ compiler to preprocess and compile files with the extension ‘.EXT’. When both ‘fpp’ and ‘cpp’ style preprocessing are provided, the former is preferred, as the latter may treat continuation lines, ‘//’ tokens, and white space differently from what some Fortran dialects expect. Conversely, if you do not want files to be preprocessed, use only lower-case characters in the file name extension. Like with ‘AC_FC_SRCEXT(f90)’, any needed flags are stored in the ‘FCFLAGS_EXT’ variable. The ‘FCFLAGS_EXT’ flags can _not_ be simply absorbed into ‘FCFLAGS’, for two reasons based on the limitations of some compilers. First, only one ‘FCFLAGS_EXT’ can be used at a time, so files with different extensions must be compiled separately. Second, ‘FCFLAGS_EXT’ must appear _immediately_ before the source-code file name when compiling. So, continuing the example above, you might compile a ‘foo.f90’ file in your makefile with the command: foo.o: foo.f90 $(FC) -c $(FCFLAGS) $(FCFLAGS_f90) '$(srcdir)/foo.f90' If ‘AC_FC_SRCEXT’ or ‘AC_FC_PP_SRCEXT’ succeeds in compiling files with the EXT extension, it calls ACTION-IF-SUCCESS (defaults to nothing). If it fails, and cannot find a way to make the ‘FC’ compiler accept such files, it calls ACTION-IF-FAILURE (defaults to exiting with an error message). The ‘AC_FC_SRCEXT’ and ‘AC_FC_PP_SRCEXT’ macros cache their results in ‘ac_cv_fc_srcext_EXT’ and ‘ac_cv_fc_pp_srcext_EXT’ variables, respectively. -- Macro: AC_FC_PP_DEFINE ([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) Find a flag to specify defines for preprocessed Fortran. Not all Fortran compilers use ‘-D’. Substitute ‘FC_DEFINE’ with the result and call ACTION-IF-SUCCESS (defaults to nothing) if successful, and ACTION-IF-FAILURE (defaults to failing with an error message) if not. This macro calls ‘AC_FC_PP_SRCEXT([F])’ in order to learn how to preprocess a ‘conftest.F’ file, but restores a previously used Fortran source file extension afterwards again. The result of this test is cached in the ‘ac_cv_fc_pp_define’ variable. -- Macro: AC_FC_FREEFORM ([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) Try to ensure that the Fortran compiler (‘$FC’) allows free-format source code (as opposed to the older fixed-format style from Fortran 77). If necessary, it may add some additional flags to ‘FCFLAGS’. This macro is most important if you are using the default ‘.f’ extension, since many compilers interpret this extension as indicating fixed-format source unless an additional flag is supplied. If you specify a different extension with ‘AC_FC_SRCEXT’, such as ‘.f90’, then ‘AC_FC_FREEFORM’ ordinarily succeeds without modifying ‘FCFLAGS’. For extensions which the compiler does not know about, the flag set by the ‘AC_FC_SRCEXT’ macro might let the compiler assume Fortran 77 by default, however. If ‘AC_FC_FREEFORM’ succeeds in compiling free-form source, it calls ACTION-IF-SUCCESS (defaults to nothing). If it fails, it calls ACTION-IF-FAILURE (defaults to exiting with an error message). The result of this test, or ‘none’ or ‘unknown’, is cached in the ‘ac_cv_fc_freeform’ variable. -- Macro: AC_FC_FIXEDFORM ([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) Try to ensure that the Fortran compiler (‘$FC’) allows the old fixed-format source code (as opposed to free-format style). If necessary, it may add some additional flags to ‘FCFLAGS’. This macro is needed for some compilers alias names like ‘xlf95’ which assume free-form source code by default, and in case you want to use fixed-form source with an extension like ‘.f90’ which many compilers interpret as free-form by default. If you specify a different extension with ‘AC_FC_SRCEXT’, such as ‘.f’, then ‘AC_FC_FIXEDFORM’ ordinarily succeeds without modifying ‘FCFLAGS’. If ‘AC_FC_FIXEDFORM’ succeeds in compiling fixed-form source, it calls ACTION-IF-SUCCESS (defaults to nothing). If it fails, it calls ACTION-IF-FAILURE (defaults to exiting with an error message). The result of this test, or ‘none’ or ‘unknown’, is cached in the ‘ac_cv_fc_fixedform’ variable. -- Macro: AC_FC_LINE_LENGTH ([LENGTH], [ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) Try to ensure that the Fortran compiler (‘$FC’) accepts long source code lines. The LENGTH argument may be given as 80, 132, or unlimited, and defaults to 132. Note that line lengths above 250 columns are not portable, and some compilers do not accept more than 132 columns at least for fixed format source. If necessary, it may add some additional flags to ‘FCFLAGS’. If ‘AC_FC_LINE_LENGTH’ succeeds in compiling fixed-form source, it calls ACTION-IF-SUCCESS (defaults to nothing). If it fails, it calls ACTION-IF-FAILURE (defaults to exiting with an error message). The result of this test, or ‘none’ or ‘unknown’, is cached in the ‘ac_cv_fc_line_length’ variable. -- Macro: AC_FC_CHECK_BOUNDS ([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) The ‘AC_FC_CHECK_BOUNDS’ macro tries to enable array bounds checking in the Fortran compiler. If successful, the ACTION-IF-SUCCESS is called and any needed flags are added to ‘FCFLAGS’. Otherwise, ACTION-IF-FAILURE is called, which defaults to failing with an error message. The macro currently requires Fortran 90 or a newer dialect. The result of the macro is cached in the ‘ac_cv_fc_check_bounds’ variable. -- Macro: AC_F77_IMPLICIT_NONE ([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) -- Macro: AC_FC_IMPLICIT_NONE ([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) Try to disallow implicit declarations in the Fortran compiler. If successful, ACTION-IF-SUCCESS is called and any needed flags are added to ‘FFLAGS’ or ‘FCFLAGS’, respectively. Otherwise, ACTION-IF-FAILURE is called, which defaults to failing with an error message. The result of these macros are cached in the ‘ac_cv_f77_implicit_none’ and ‘ac_cv_fc_implicit_none’ variables, respectively. -- Macro: AC_FC_MODULE_EXTENSION Find the Fortran 90 module file name extension. Most Fortran 90 compilers store module information in files separate from the object files. The module files are usually named after the name of the module rather than the source file name, with characters possibly turned to upper case, plus an extension, often ‘.mod’. Not all compilers use module files at all, or by default. The Cray Fortran compiler requires ‘-e m’ in order to store and search module information in ‘.mod’ files rather than in object files. Likewise, the Fujitsu Fortran compilers uses the ‘-Am’ option to indicate how module information is stored. The ‘AC_FC_MODULE_EXTENSION’ macro computes the module extension without the leading dot, and stores that in the ‘FC_MODEXT’ variable. If the compiler does not produce module files, or the extension cannot be determined, ‘FC_MODEXT’ is empty. Typically, the result of this macro may be used in cleanup ‘make’ rules as follows: clean-modules: -test -z "$(FC_MODEXT)" || rm -f *.$(FC_MODEXT) The extension, or ‘unknown’, is cached in the ‘ac_cv_fc_module_ext’ variable. -- Macro: AC_FC_MODULE_FLAG ([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) Find the compiler flag to include Fortran 90 module information from another directory, and store that in the ‘FC_MODINC’ variable. Call ACTION-IF-SUCCESS (defaults to nothing) if successful, and set ‘FC_MODINC’ to empty and call ACTION-IF-FAILURE (defaults to exiting with an error message) if not. Most Fortran 90 compilers provide a way to specify module directories. Some have separate flags for the directory to write module files to, and directories to search them in, whereas others only allow writing to the current directory or to the first directory specified in the include path. Further, with some compilers, the module search path and the preprocessor search path can only be modified with the same flag. Thus, for portability, write module files to the current directory only and list that as first directory in the search path. There may be no whitespace between ‘FC_MODINC’ and the following directory name, but ‘FC_MODINC’ may contain trailing white space. For example, if you use Automake and would like to search ‘../lib’ for module files, you can use the following: AM_FCFLAGS = $(FC_MODINC). $(FC_MODINC)../lib Inside ‘configure’ tests, you can use: if test -n "$FC_MODINC"; then FCFLAGS="$FCFLAGS $FC_MODINC. $FC_MODINC../lib" fi The flag is cached in the ‘ac_cv_fc_module_flag’ variable. The substituted value of ‘FC_MODINC’ may refer to the ‘ac_empty’ dummy placeholder empty variable, to avoid losing the significant trailing whitespace in a ‘Makefile’. -- Macro: AC_FC_MODULE_OUTPUT_FLAG ([ACTION-IF-SUCCESS], [ACTION-IF-FAILURE = ‘AC_MSG_FAILURE’]) Find the compiler flag to write Fortran 90 module information to another directory, and store that in the ‘FC_MODOUT’ variable. Call ACTION-IF-SUCCESS (defaults to nothing) if successful, and set ‘FC_MODOUT’ to empty and call ACTION-IF-FAILURE (defaults to exiting with an error message) if not. Not all Fortran 90 compilers write module files, and of those that do, not all allow writing to a directory other than the current one, nor do all have separate flags for writing and reading; see the description of ‘AC_FC_MODULE_FLAG’ above. If you need to be able to write to another directory, for maximum portability use ‘FC_MODOUT’ before any ‘FC_MODINC’ and include both the current directory and the one you write to in the search path: AM_FCFLAGS = $(FC_MODOUT)../mod $(FC_MODINC)../mod $(FC_MODINC). ... The flag is cached in the ‘ac_cv_fc_module_output_flag’ variable. The substituted value of ‘FC_MODOUT’ may refer to the ‘ac_empty’ dummy placeholder empty variable, to avoid losing the significant trailing whitespace in a ‘Makefile’.  File: autoconf.info, Node: Go Compiler, Prev: Fortran Compiler, Up: Compilers and Preprocessors 5.10.9 Go Compiler Characteristics ---------------------------------- Autoconf provides basic support for the Go programming language when using the ‘gccgo’ compiler (there is currently no support for the ‘6g’ and ‘8g’ compilers). -- Macro: AC_PROG_GO ([COMPILER-SEARCH-LIST]) Find the Go compiler to use. Check whether the environment variable ‘GOC’ is set; if so, then set output variable ‘GOC’ to its value. Otherwise, if the macro is invoked without an argument, then search for a Go compiler named ‘gccgo’. If it is not found, then as a last resort set ‘GOC’ to ‘gccgo’. This macro may be invoked with an optional first argument which, if specified, must be a blank-separated list of Go compilers to search for. If output variable ‘GOFLAGS’ was not already set, set it to ‘-g -O2’. If your package does not like this default, ‘GOFLAGS’ may be set before ‘AC_PROG_GO’.  File: autoconf.info, Node: System Services, Next: C and Posix Variants, Prev: Compilers and Preprocessors, Up: Existing Tests 5.11 System Services ==================== The following macros check for operating system services or capabilities. -- Macro: AC_PATH_X Try to locate the X Window System include files and libraries. If the user gave the command line options ‘--x-includes=DIR’ and ‘--x-libraries=DIR’, use those directories. If either or both were not given, get the missing values by running ‘xmkmf’ (or an executable pointed to by the ‘XMKMF’ environment variable) on a trivial ‘Imakefile’ and examining the makefile that it produces. Setting ‘XMKMF’ to ‘false’ disables this method. If this method fails to find the X Window System, ‘configure’ looks for the files in several directories where they often reside. If either method is successful, set the shell variables ‘x_includes’ and ‘x_libraries’ to their locations, unless they are in directories the compiler searches by default. If both methods fail, or the user gave the command line option ‘--without-x’, set the shell variable ‘no_x’ to ‘yes’; otherwise set it to the empty string. -- Macro: AC_PATH_XTRA An enhanced version of ‘AC_PATH_X’. It adds the C compiler flags that X needs to output variable ‘X_CFLAGS’, and the X linker flags to ‘X_LIBS’. Define ‘X_DISPLAY_MISSING’ if X is not available. This macro also checks for special libraries that some systems need in order to compile X programs. It adds any that the system needs to output variable ‘X_EXTRA_LIBS’. And it checks for special X11R6 libraries that need to be linked with before ‘-lX11’, and adds any found to the output variable ‘X_PRE_LIBS’. -- Macro: AC_SYS_INTERPRETER Check whether the system supports starting scripts with a line of the form ‘#!/bin/sh’ to select the interpreter to use for the script. After running this macro, shell code in ‘configure.ac’ can check the shell variable ‘interpval’; it is set to ‘yes’ if the system supports ‘#!’, ‘no’ if not. -- Macro: AC_SYS_LARGEFILE Arrange for 64-bit file offsets, known as large-file support (http://www.unix.org/version2/whatsnew/lfs20mar.html). On some hosts, one must use special compiler options to build programs that can access large files. Append any such options to the output variable ‘CC’. Define ‘_FILE_OFFSET_BITS’ and ‘_LARGE_FILES’ if necessary. Large-file support can be disabled by configuring with the ‘--disable-largefile’ option. If you use this macro, check that your program works even when ‘off_t’ is wider than ‘long int’, since this is common when large-file support is enabled. For example, it is not correct to print an arbitrary ‘off_t’ value ‘X’ with ‘printf ("%ld", (long int) X)’. Also, when using this macro in concert with ‘AC_CONFIG_HEADERS’, be sure that ‘config.h’ is included before any system header. The LFS introduced the ‘fseeko’ and ‘ftello’ functions to replace their C counterparts ‘fseek’ and ‘ftell’ that do not use ‘off_t’. Take care to use ‘AC_FUNC_FSEEKO’ to make their prototypes available when using them and large-file support is enabled. -- Macro: AC_SYS_LONG_FILE_NAMES If the system supports file names longer than 14 characters, define ‘HAVE_LONG_FILE_NAMES’. -- Macro: AC_SYS_POSIX_TERMIOS Check to see if the Posix termios headers and functions are available on the system. If so, set the shell variable ‘ac_cv_sys_posix_termios’ to ‘yes’. If not, set the variable to ‘no’.  File: autoconf.info, Node: C and Posix Variants, Next: Erlang Libraries, Prev: System Services, Up: Existing Tests 5.12 C and Posix Variants ========================= The following macro makes it possible to use C language and library extensions defined by the C standards committee, features of Posix that are extensions to C, and platform extensions not defined by Posix. -- Macro: AC_USE_SYSTEM_EXTENSIONS If possible, enable extensions to C or Posix on hosts that normally disable the extensions, typically due to standards-conformance namespace issues. This should be called before any macros that run the C compiler. Also, when using this macro in concert with ‘AC_CONFIG_HEADERS’, be sure that ‘config.h’ is included before any system header. The following preprocessor macros are defined unconditionally: ‘_ALL_SOURCE’ Enable extensions on AIX 3 and Interix. ‘_DARWIN_C_SOURCE’ Enable extensions on macOS. ‘_GNU_SOURCE’ Enable extensions on GNU systems. ‘_NETBSD_SOURCE’ Enable general extensions on NetBSD. Enable NetBSD compatibility extensions on Minix. ‘_OPENBSD_SOURCE’ Enable OpenBSD compatibility extensions on NetBSD. Oddly enough, this does nothing on OpenBSD. ‘_POSIX_PTHREAD_SEMANTICS’ Enable Posix-compatible threading on Solaris. ‘__STDC_WANT_IEC_60559_ATTRIBS_EXT__’ Enable extensions specified by ISO/IEC TS 18661-5:2014. ‘__STDC_WANT_IEC_60559_BFP_EXT__’ Enable extensions specified by ISO/IEC TS 18661-1:2014. ‘__STDC_WANT_IEC_60559_DFP_EXT__’ Enable extensions specified by ISO/IEC TS 18661-2:2015. ‘__STDC_WANT_IEC_60559_FUNCS_EXT__’ Enable extensions specified by ISO/IEC TS 18661-4:2015. ‘__STDC_WANT_IEC_60559_TYPES_EXT__’ Enable extensions specified by ISO/IEC TS 18661-3:2015. ‘__STDC_WANT_LIB_EXT2__’ Enable extensions specified by ISO/IEC TR 24731-2:2010. ‘__STDC_WANT_MATH_SPEC_FUNCS__’ Enable extensions specified by ISO/IEC 24747:2009. ‘_TANDEM_SOURCE’ Enable extensions on HP NonStop systems. The following preprocessor macros are defined only when necessary; they enable access to extensions on some operating systems but _disable_ extensions on other operating systems. ‘__EXTENSIONS__’ Enable general extensions on Solaris. This macro is defined only if the headers included by ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::) work correctly with it defined. ‘_MINIX’ ‘_POSIX_SOURCE’ ‘_POSIX_1_SOURCE’ Defined only on MINIX. ‘_POSIX_SOURCE’ and ‘_POSIX_1_SOURCE’ are needed to enable a number of POSIX features on this OS. ‘_MINIX’ does not affect the system headers’ behavior; future versions of Autoconf may stop defining it. Programs that need to recognize Minix should use ‘AC_CANONICAL_HOST’. ‘_XOPEN_SOURCE’ Defined (with value 500) only if needed to make ‘wchar.h’ declare ‘mbstate_t’. This is known to be necessary on some versions of HP/UX. The C preprocessor macro ‘__STDC_WANT_DEC_FP__’ is not defined. ISO/IEC TR 24732:2009 was superseded by ISO/IEC TS 18661-2:2015. The C preprocessor macro ‘__STDC_WANT_LIB_EXT1__’ is not defined, as C11 Annex K is problematic. See: O’Donell C, Sebor M. Field Experience With Annex K—Bounds Checking Interfaces (http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1967.htm). The Autoconf macro ‘AC_USE_SYSTEM_EXTENSIONS’ was introduced in Autoconf 2.60.  File: autoconf.info, Node: Erlang Libraries, Prev: C and Posix Variants, Up: Existing Tests 5.13 Erlang Libraries ===================== The following macros check for an installation of Erlang/OTP, and for the presence of certain Erlang libraries. All those macros require the configuration of an Erlang interpreter and an Erlang compiler (*note Erlang Compiler and Interpreter::). -- Macro: AC_ERLANG_SUBST_ERTS_VER Set the output variable ‘ERLANG_ERTS_VER’ to the version of the Erlang runtime system (as returned by Erlang’s ‘erlang:system_info(version)’ function). The result of this test is cached if caching is enabled when running ‘configure’. The ‘ERLANG_ERTS_VER’ variable is not intended to be used for testing for features of specific ERTS versions, but to be used for substituting the ERTS version in Erlang/OTP release resource files (‘.rel’ files), as shown below. -- Macro: AC_ERLANG_SUBST_ROOT_DIR Set the output variable ‘ERLANG_ROOT_DIR’ to the path to the base directory in which Erlang/OTP is installed (as returned by Erlang’s ‘code:root_dir/0’ function). The result of this test is cached if caching is enabled when running ‘configure’. -- Macro: AC_ERLANG_SUBST_LIB_DIR Set the output variable ‘ERLANG_LIB_DIR’ to the path of the library directory of Erlang/OTP (as returned by Erlang’s ‘code:lib_dir/0’ function), which subdirectories each contain an installed Erlang/OTP library. The result of this test is cached if caching is enabled when running ‘configure’. -- Macro: AC_ERLANG_CHECK_LIB (LIBRARY, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) Test whether the Erlang/OTP library LIBRARY is installed by calling Erlang’s ‘code:lib_dir/1’ function. The result of this test is cached if caching is enabled when running ‘configure’. ACTION-IF-FOUND is a list of shell commands to run if the library is installed; ACTION-IF-NOT-FOUND is a list of shell commands to run if it is not. Additionally, if the library is installed, the output variable ‘ERLANG_LIB_DIR_LIBRARY’ is set to the path to the library installation directory, and the output variable ‘ERLANG_LIB_VER_LIBRARY’ is set to the version number that is part of the subdirectory name, if it is in the standard form (‘LIBRARY-VERSION’). If the directory name does not have a version part, ‘ERLANG_LIB_VER_LIBRARY’ is set to the empty string. If the library is not installed, ‘ERLANG_LIB_DIR_LIBRARY’ and ‘ERLANG_LIB_VER_LIBRARY’ are set to ‘"not found"’. For example, to check if library ‘stdlib’ is installed: AC_ERLANG_CHECK_LIB([stdlib], [echo "stdlib version \"$ERLANG_LIB_VER_stdlib\"" echo "is installed in \"$ERLANG_LIB_DIR_stdlib\""], [AC_MSG_ERROR([stdlib was not found!])]) The ‘ERLANG_LIB_VER_LIBRARY’ variables (set by ‘AC_ERLANG_CHECK_LIB’) and the ‘ERLANG_ERTS_VER’ variable (set by ‘AC_ERLANG_SUBST_ERTS_VER’) are not intended to be used for testing for features of specific versions of libraries or of the Erlang runtime system. Those variables are intended to be substituted in Erlang release resource files (‘.rel’ files). For instance, to generate a ‘example.rel’ file for an application depending on the ‘stdlib’ library, ‘configure.ac’ could contain: AC_ERLANG_SUBST_ERTS_VER AC_ERLANG_CHECK_LIB([stdlib], [], [AC_MSG_ERROR([stdlib was not found!])]) AC_CONFIG_FILES([example.rel]) The ‘example.rel.in’ file used to generate ‘example.rel’ should contain: {release, {"@PACKAGE@", "@VERSION@"}, {erts, "@ERLANG_ERTS_VER@"}, [{stdlib, "@ERLANG_LIB_VER_stdlib@"}, {@PACKAGE@, "@VERSION@"}]}. In addition to the above macros, which test installed Erlang libraries, the following macros determine the paths to the directories into which newly built Erlang libraries are to be installed: -- Macro: AC_ERLANG_SUBST_INSTALL_LIB_DIR Set the ‘ERLANG_INSTALL_LIB_DIR’ output variable to the directory into which every built Erlang library should be installed in a separate subdirectory. If this variable is not set in the environment when ‘configure’ runs, its default value is ‘${libdir}/erlang/lib’. -- Macro: AC_ERLANG_SUBST_INSTALL_LIB_SUBDIR (LIBRARY, VERSION) Set the ‘ERLANG_INSTALL_LIB_DIR_LIBRARY’ output variable to the directory into which the built Erlang library LIBRARY version VERSION should be installed. If this variable is not set in the environment when ‘configure’ runs, its default value is ‘$ERLANG_INSTALL_LIB_DIR/LIBRARY-VERSION’, the value of the ‘ERLANG_INSTALL_LIB_DIR’ variable being set by the ‘AC_ERLANG_SUBST_INSTALL_LIB_DIR’ macro.  File: autoconf.info, Node: Writing Tests, Next: Results, Prev: Existing Tests, Up: Top 6 Writing Tests *************** If the existing feature tests don’t do something you need, you have to write new ones. These macros are the building blocks. They provide ways for other macros to check whether various kinds of features are available and report the results. This chapter contains some suggestions and some of the reasons why the existing tests are written the way they are. You can also learn a lot about how to write Autoconf tests by looking at the existing ones. If something goes wrong in one or more of the Autoconf tests, this information can help you understand the assumptions behind them, which might help you figure out how to best solve the problem. These macros check the output of the compiler system of the current language (*note Language Choice::). They do not cache the results of their tests for future use (*note Caching Results::), because they don’t know enough about the information they are checking for to generate a cache variable name. They also do not print any messages, for the same reason. The checks for particular kinds of features call these macros and do cache their results and print messages about what they’re checking for. When you write a feature test that could be applicable to more than one software package, the best thing to do is encapsulate it in a new macro. *Note Writing Autoconf Macros::, for how to do that. * Menu: * Language Choice:: Selecting which language to use for testing * Writing Test Programs:: Forging source files for compilers * Running the Preprocessor:: Detecting preprocessor symbols * Running the Compiler:: Detecting language or header features * Running the Linker:: Detecting library features * Runtime:: Testing for runtime features * Systemology:: A zoology of operating systems * Multiple Cases:: Tests for several possible values  File: autoconf.info, Node: Language Choice, Next: Writing Test Programs, Up: Writing Tests 6.1 Language Choice =================== Autoconf-generated ‘configure’ scripts check for the C compiler and its features by default. Packages that use other programming languages (maybe more than one, e.g., C and C++) need to test features of the compilers for the respective languages. The following macros determine which programming language is used in the subsequent tests in ‘configure.ac’. -- Macro: AC_LANG (LANGUAGE) Do compilation tests using the compiler, preprocessor, and file extensions for the specified LANGUAGE. Supported languages are: ‘C’ Do compilation tests using ‘CC’ and ‘CPP’ and use extension ‘.c’ for test programs. Use compilation flags: ‘CPPFLAGS’ with ‘CPP’, and both ‘CPPFLAGS’ and ‘CFLAGS’ with ‘CC’. ‘C++’ Do compilation tests using ‘CXX’ and ‘CXXCPP’ and use extension ‘.C’ for test programs. Use compilation flags: ‘CPPFLAGS’ with ‘CXXCPP’, and both ‘CPPFLAGS’ and ‘CXXFLAGS’ with ‘CXX’. ‘Fortran 77’ Do compilation tests using ‘F77’ and use extension ‘.f’ for test programs. Use compilation flags: ‘FFLAGS’. ‘Fortran’ Do compilation tests using ‘FC’ and use extension ‘.f’ (or whatever has been set by ‘AC_FC_SRCEXT’) for test programs. Use compilation flags: ‘FCFLAGS’. ‘Erlang’ Compile and execute tests using ‘ERLC’ and ‘ERL’ and use extension ‘.erl’ for test Erlang modules. Use compilation flags: ‘ERLCFLAGS’. ‘Objective C’ Do compilation tests using ‘OBJC’ and ‘OBJCPP’ and use extension ‘.m’ for test programs. Use compilation flags: ‘CPPFLAGS’ with ‘OBJCPP’, and both ‘CPPFLAGS’ and ‘OBJCFLAGS’ with ‘OBJC’. ‘Objective C++’ Do compilation tests using ‘OBJCXX’ and ‘OBJCXXCPP’ and use extension ‘.mm’ for test programs. Use compilation flags: ‘CPPFLAGS’ with ‘OBJCXXCPP’, and both ‘CPPFLAGS’ and ‘OBJCXXFLAGS’ with ‘OBJCXX’. ‘Go’ Do compilation tests using ‘GOC’ and use extension ‘.go’ for test programs. Use compilation flags ‘GOFLAGS’. -- Macro: AC_LANG_PUSH (LANGUAGE) Remember the current language (as set by ‘AC_LANG’) on a stack, and then select the LANGUAGE. Use this macro and ‘AC_LANG_POP’ in macros that need to temporarily switch to a particular language. -- Macro: AC_LANG_POP ([LANGUAGE]) Select the language that is saved on the top of the stack, as set by ‘AC_LANG_PUSH’, and remove it from the stack. If given, LANGUAGE specifies the language we just _quit_. It is a good idea to specify it when it’s known (which should be the case...), since Autoconf detects inconsistencies. AC_LANG_PUSH([Fortran 77]) # Perform some tests on Fortran 77. # ... AC_LANG_POP([Fortran 77]) -- Macro: AC_LANG_ASSERT (LANGUAGE) Check statically that the current language is LANGUAGE. You should use this in your language specific macros to avoid that they be called with an inappropriate language. This macro runs only at ‘autoconf’ time, and incurs no cost at ‘configure’ time. Sadly enough and because Autoconf is a two layer language (1), the macros ‘AC_LANG_PUSH’ and ‘AC_LANG_POP’ cannot be “optimizing”, therefore as much as possible you ought to avoid using them to wrap your code, rather, require from the user to run the macro with a correct current language, and check it with ‘AC_LANG_ASSERT’. And anyway, that may help the user understand she is running a Fortran macro while expecting a result about her Fortran 77 compiler... -- Macro: AC_REQUIRE_CPP Ensure that whichever preprocessor would currently be used for tests has been found. Calls ‘AC_REQUIRE’ (*note Prerequisite Macros::) with an argument of either ‘AC_PROG_CPP’ or ‘AC_PROG_CXXCPP’, depending on which language is current. ---------- Footnotes ---------- (1) Because M4 is not aware of Sh code, especially conditionals, some optimizations that look nice statically may produce incorrect results at runtime.  File: autoconf.info, Node: Writing Test Programs, Next: Running the Preprocessor, Prev: Language Choice, Up: Writing Tests 6.2 Writing Test Programs ========================= Autoconf tests follow a common scheme: feed some program with some input, and most of the time, feed a compiler with some source file. This section is dedicated to these source samples. * Menu: * Guidelines:: General rules for writing test programs * Test Functions:: Avoiding pitfalls in test programs * Generating Sources:: Source program boilerplate  File: autoconf.info, Node: Guidelines, Next: Test Functions, Up: Writing Test Programs 6.2.1 Guidelines for Test Programs ---------------------------------- The most important rule to follow when writing testing samples is: _Look for realism._ This motto means that testing samples must be written with the same strictness as real programs are written. In particular, you should avoid “shortcuts” and simplifications. Don’t just play with the preprocessor if you want to prepare a compilation. For instance, using ‘cpp’ to check whether a header is functional might let your ‘configure’ accept a header which causes some _compiler_ error. Do not hesitate to check a header with other headers included before, especially required headers. Make sure the symbols you use are properly defined, i.e., refrain from simply declaring a function yourself instead of including the proper header. Test programs should not write to standard output. They should exit with status 0 if the test succeeds, and with status 1 otherwise, so that success can be distinguished easily from a core dump or other failure; segmentation violations and other failures produce a nonzero exit status. Unless you arrange for ‘exit’ to be declared, test programs should ‘return’, not ‘exit’, from ‘main’, because on many systems ‘exit’ is not declared by default. Test programs can use ‘#if’ or ‘#ifdef’ to check the values of preprocessor macros defined by tests that have already run. For example, if you call ‘AC_HEADER_STDBOOL’, then later on in ‘configure.ac’ you can have a test program that includes ‘stdbool.h’ conditionally: #ifdef HAVE_STDBOOL_H # include #endif Both ‘#if HAVE_STDBOOL_H’ and ‘#ifdef HAVE_STDBOOL_H’ will work with any standard C compiler. Some developers prefer ‘#if’ because it is easier to read, while others prefer ‘#ifdef’ because it avoids diagnostics with picky compilers like GCC with the ‘-Wundef’ option. If a test program needs to use or create a data file, give it a name that starts with ‘conftest’, such as ‘conftest.data’. The ‘configure’ script cleans up by running ‘rm -f -r conftest*’ after running test programs and if the script is interrupted.  File: autoconf.info, Node: Test Functions, Next: Generating Sources, Prev: Guidelines, Up: Writing Test Programs 6.2.2 Test Functions -------------------- These days it’s safe to assume support for function prototypes (introduced in C89). Functions that test programs declare should also be conditionalized for C++, which requires ‘extern "C"’ prototypes. Make sure to not include any header files containing clashing prototypes. #ifdef __cplusplus extern "C" #endif void *valloc (size_t); If a test program calls a function with invalid parameters (just to see whether it exists), organize the program to ensure that it never invokes that function. You can do this by calling it in another function that is never invoked. You can’t do it by putting it after a call to ‘exit’, because GCC version 2 knows that ‘exit’ never returns and optimizes out any code that follows it in the same block. If you include any header files, be sure to call the functions relevant to them with the correct number of arguments, even if they are just 0, to avoid compilation errors due to prototypes. GCC version 2 has internal prototypes for several functions that it automatically inlines; for example, ‘memcpy’. To avoid errors when checking for them, either pass them the correct number of arguments or redeclare them with a different return type (such as ‘char’).  File: autoconf.info, Node: Generating Sources, Prev: Test Functions, Up: Writing Test Programs 6.2.3 Generating Sources ------------------------ Autoconf provides a set of macros that can be used to generate test source files. They are written to be language generic, i.e., they actually depend on the current language (*note Language Choice::) to “format” the output properly. -- Macro: AC_LANG_CONFTEST (SOURCE) Save the SOURCE text in the current test source file: ‘conftest.EXTENSION’ where the EXTENSION depends on the current language. As of Autoconf 2.63b, the source file also contains the results of all of the ‘AC_DEFINE’ performed so far. Note that the SOURCE is evaluated exactly once, like regular Autoconf macro arguments, and therefore (i) you may pass a macro invocation, (ii) if not, be sure to double quote if needed. This macro issues a warning during ‘autoconf’ processing if SOURCE does not include an expansion of the macro ‘AC_LANG_DEFINES_PROVIDED’ (note that both ‘AC_LANG_SOURCE’ and ‘AC_LANG_PROGRAM’ call this macro, and thus avoid the warning). This macro is seldom called directly, but is used under the hood by more common macros such as ‘AC_COMPILE_IFELSE’ and ‘AC_RUN_IFELSE’. -- Macro: AC_LANG_DEFINES_PROVIDED This macro is called as a witness that the file ‘conftest.EXTENSION’ appropriate for the current language is complete, including all previously determined results from ‘AC_DEFINE’. This macro is seldom called directly, but exists if you have a compelling reason to write a conftest file without using ‘AC_LANG_SOURCE’, yet still want to avoid a syntax warning from ‘AC_LANG_CONFTEST’. -- Macro: AC_LANG_SOURCE (SOURCE) Expands into the SOURCE, with the definition of all the ‘AC_DEFINE’ performed so far. This macro includes an expansion of ‘AC_LANG_DEFINES_PROVIDED’. In many cases, you may find it more convenient to use the wrapper ‘AC_LANG_PROGRAM’. For instance, executing (observe the double quotation!): AC_INIT([Hello], [1.0], [bug-hello@example.org], [], [https://www.example.org/]) AC_DEFINE([HELLO_WORLD], ["Hello, World\n"], [Greetings string.]) AC_LANG([C]) AC_LANG_CONFTEST( [AC_LANG_SOURCE([[const char hw[] = "Hello, World\n";]])]) gcc -E -dD conftest.c on a system with ‘gcc’ installed, results in: ... # 1 "conftest.c" #define PACKAGE_NAME "Hello" #define PACKAGE_TARNAME "hello" #define PACKAGE_VERSION "1.0" #define PACKAGE_STRING "Hello 1.0" #define PACKAGE_BUGREPORT "bug-hello@example.org" #define PACKAGE_URL "https://www.example.org/" #define HELLO_WORLD "Hello, World\n" const char hw[] = "Hello, World\n"; When the test language is Fortran, Erlang, or Go, the ‘AC_DEFINE’ definitions are not automatically translated into constants in the source code by this macro. -- Macro: AC_LANG_PROGRAM (PROLOGUE, BODY) Expands into a source file which consists of the PROLOGUE, and then BODY as body of the main function (e.g., ‘main’ in C). Since it uses ‘AC_LANG_SOURCE’, the features of the latter are available. For instance: AC_INIT([Hello], [1.0], [bug-hello@example.org], [], [https://www.example.org/]) AC_DEFINE([HELLO_WORLD], ["Hello, World\n"], [Greetings string.]) AC_LANG_CONFTEST( [AC_LANG_PROGRAM([[const char hw[] = "Hello, World\n";]], [[fputs (hw, stdout);]])]) gcc -E -dD conftest.c on a system with ‘gcc’ installed, results in: ... # 1 "conftest.c" #define PACKAGE_NAME "Hello" #define PACKAGE_TARNAME "hello" #define PACKAGE_VERSION "1.0" #define PACKAGE_STRING "Hello 1.0" #define PACKAGE_BUGREPORT "bug-hello@example.org" #define PACKAGE_URL "https://www.example.org/" #define HELLO_WORLD "Hello, World\n" const char hw[] = "Hello, World\n"; int main (void) { fputs (hw, stdout); ; return 0; } In Erlang tests, the created source file is that of an Erlang module called ‘conftest’ (‘conftest.erl’). This module defines and exports at least one ‘start/0’ function, which is called to perform the test. The PROLOGUE is optional code that is inserted between the module header and the ‘start/0’ function definition. BODY is the body of the ‘start/0’ function without the final period (*note Runtime::, about constraints on this function’s behavior). For instance: AC_INIT([Hello], [1.0], [bug-hello@example.org]) AC_LANG(Erlang) AC_LANG_CONFTEST( [AC_LANG_PROGRAM([[-define(HELLO_WORLD, "Hello, world!").]], [[io:format("~s~n", [?HELLO_WORLD])]])]) cat conftest.erl results in: -module(conftest). -export([start/0]). -define(HELLO_WORLD, "Hello, world!"). start() -> io:format("~s~n", [?HELLO_WORLD]) . -- Macro: AC_LANG_CALL (PROLOGUE, FUNCTION) Expands into a source file which consists of the PROLOGUE, and then a call to the FUNCTION as body of the main function (e.g., ‘main’ in C). Since it uses ‘AC_LANG_PROGRAM’, the feature of the latter are available. This function will probably be replaced in the future by a version which would enable specifying the arguments. The use of this macro is not encouraged, as it violates strongly the typing system. This macro cannot be used for Erlang tests. -- Macro: AC_LANG_FUNC_LINK_TRY (FUNCTION) Expands into a source file which uses the FUNCTION in the body of the main function (e.g., ‘main’ in C). Since it uses ‘AC_LANG_PROGRAM’, the features of the latter are available. As ‘AC_LANG_CALL’, this macro is documented only for completeness. It is considered to be severely broken, and in the future will be removed in favor of actual function calls (with properly typed arguments). This macro cannot be used for Erlang tests.  File: autoconf.info, Node: Running the Preprocessor, Next: Running the Compiler, Prev: Writing Test Programs, Up: Writing Tests 6.3 Running the Preprocessor ============================ Sometimes one might need to run the preprocessor on some source file. _Usually it is a bad idea_, as you typically need to _compile_ your project, not merely run the preprocessor on it; therefore you certainly want to run the compiler, not the preprocessor. Resist the temptation of following the easiest path. Nevertheless, if you need to run the preprocessor, then use ‘AC_PREPROC_IFELSE’. The macros described in this section cannot be used for tests in Erlang, Fortran, or Go, since those languages require no preprocessor. -- Macro: AC_PREPROC_IFELSE (INPUT, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) Run the preprocessor of the current language (*note Language Choice::) on the INPUT, run the shell commands ACTION-IF-TRUE on success, ACTION-IF-FALSE otherwise. The INPUT can be made by ‘AC_LANG_PROGRAM’ and friends. This macro uses ‘CPPFLAGS’, but not ‘CFLAGS’, because ‘-g’, ‘-O’, etc. are not valid options to many C preprocessors. It is customary to report unexpected failures with ‘AC_MSG_FAILURE’. If needed, ACTION-IF-TRUE can further access the preprocessed output in the file ‘conftest.i’. For instance: AC_INIT([Hello], [1.0], [bug-hello@example.org]) AC_DEFINE([HELLO_WORLD], ["Hello, World\n"], [Greetings string.]) AC_PREPROC_IFELSE( [AC_LANG_PROGRAM([[const char hw[] = "Hello, World\n";]], [[fputs (hw, stdout);]])], [AC_MSG_RESULT([OK])], [AC_MSG_FAILURE([unexpected preprocessor failure])]) might result in: checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.out checking for suffix of executables... checking whether we are cross compiling... no checking for suffix of object files... o checking whether the compiler supports GNU C... yes checking whether gcc accepts -g... yes checking for gcc option to enable C11 features... -std=gnu11 checking how to run the C preprocessor... gcc -std=gnu11 -E OK The macro ‘AC_TRY_CPP’ (*note Obsolete Macros::) used to play the role of ‘AC_PREPROC_IFELSE’, but double quotes its argument, making it impossible to use it to elaborate sources. You are encouraged to get rid of your old use of the macro ‘AC_TRY_CPP’ in favor of ‘AC_PREPROC_IFELSE’, but, in the first place, are you sure you need to run the _preprocessor_ and not the compiler? -- Macro: AC_EGREP_HEADER (PATTERN, HEADER-FILE, ACTION-IF-FOUND, [ACTION-IF-NOT-FOUND]) If the output of running the preprocessor on the system header file HEADER-FILE matches the extended regular expression PATTERN, execute shell commands ACTION-IF-FOUND, otherwise execute ACTION-IF-NOT-FOUND. See below for some problems involving this macro. -- Macro: AC_EGREP_CPP (PATTERN, PROGRAM, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) PROGRAM is the text of a C or C++ program, on which shell variable, back quote, and backslash substitutions are performed. If the output of running the preprocessor on PROGRAM matches the extended regular expression PATTERN, execute shell commands ACTION-IF-FOUND, otherwise execute ACTION-IF-NOT-FOUND. See below for some problems involving this macro. ‘AC_EGREP_CPP’ and ‘AC_EGREP_HEADER’ should be used with care, as preprocessors can insert line breaks between output tokens. For example, the preprocessor might transform this: #define MAJOR 2 #define MINOR 23 Version MAJOR . MINOR into this: Version 2 . 23 Because preprocessors are allowed to insert white space, change escapes in string contants, insert backlash-newline pairs, or do any of a number of things that do not change the meaning of the preprocessed program, it is better to rely on ‘AC_PREPROC_IFELSE’ than to resort to ‘AC_EGREP_CPP’ or ‘AC_EGREP_HEADER’.  File: autoconf.info, Node: Running the Compiler, Next: Running the Linker, Prev: Running the Preprocessor, Up: Writing Tests 6.4 Running the Compiler ======================== To check for a syntax feature of the current language’s (*note Language Choice::) compiler, such as whether it recognizes a certain keyword, or simply to try some library feature, use ‘AC_COMPILE_IFELSE’ to try to compile a small program that uses that feature. -- Macro: AC_COMPILE_IFELSE (INPUT, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) Run the compiler and compilation flags of the current language (*note Language Choice::) on the INPUT, run the shell commands ACTION-IF-TRUE on success, ACTION-IF-FALSE otherwise. The INPUT can be made by ‘AC_LANG_PROGRAM’ and friends. It is customary to report unexpected failures with ‘AC_MSG_FAILURE’. This macro does not try to link; use ‘AC_LINK_IFELSE’ if you need to do that (*note Running the Linker::). If needed, ACTION-IF-TRUE can further access the just-compiled object file ‘conftest.$OBJEXT’. This macro uses ‘AC_REQUIRE’ for the compiler associated with the current language, which means that if the compiler has not yet been determined, the compiler determination will be made prior to the body of the outermost ‘AC_DEFUN’ macro that triggered this macro to expand (*note Expanded Before Required::). For tests in Erlang, the INPUT must be the source code of a module named ‘conftest’. ‘AC_COMPILE_IFELSE’ generates a ‘conftest.beam’ file that can be interpreted by the Erlang virtual machine (‘ERL’). It is recommended to use ‘AC_LANG_PROGRAM’ to specify the test program, to ensure that the Erlang module has the right name.  File: autoconf.info, Node: Running the Linker, Next: Runtime, Prev: Running the Compiler, Up: Writing Tests 6.5 Running the Linker ====================== To check for a library, a function, or a global variable, Autoconf ‘configure’ scripts try to compile and link a small program that uses it. This is unlike Metaconfig, which by default uses ‘nm’ or ‘ar’ on the C library to try to figure out which functions are available. Trying to link with the function is usually a more reliable approach because it avoids dealing with the variations in the options and output formats of ‘nm’ and ‘ar’ and in the location of the standard libraries. It also allows configuring for cross-compilation or checking a function’s runtime behavior if needed. On the other hand, it can be slower than scanning the libraries once, but accuracy is more important than speed. ‘AC_LINK_IFELSE’ is used to compile test programs to test for functions and global variables. It is also used by ‘AC_CHECK_LIB’ to check for libraries (*note Libraries::), by adding the library being checked for to ‘LIBS’ temporarily and trying to link a small program. -- Macro: AC_LINK_IFELSE (INPUT, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) Run the compiler (and compilation flags) and the linker of the current language (*note Language Choice::) on the INPUT, run the shell commands ACTION-IF-TRUE on success, ACTION-IF-FALSE otherwise. The INPUT can be made by ‘AC_LANG_PROGRAM’ and friends. If needed, ACTION-IF-TRUE can further access the just-linked program file ‘conftest$EXEEXT’. ‘LDFLAGS’ and ‘LIBS’ are used for linking, in addition to the current compilation flags. It is customary to report unexpected failures with ‘AC_MSG_FAILURE’. This macro does not try to execute the program; use ‘AC_RUN_IFELSE’ if you need to do that (*note Runtime::). The ‘AC_LINK_IFELSE’ macro cannot be used for Erlang tests, since Erlang programs are interpreted and do not require linking.  File: autoconf.info, Node: Runtime, Next: Systemology, Prev: Running the Linker, Up: Writing Tests 6.6 Checking Runtime Behavior ============================= Sometimes you need to find out how a system performs at runtime, such as whether a given function has a certain capability or bug. If you can, make such checks when your program runs instead of when it is configured. You can check for things like the machine’s endianness when your program initializes itself. If you really need to test for a runtime behavior while configuring, you can write a test program to determine the result, and compile and run it using ‘AC_RUN_IFELSE’. Avoid running test programs if possible, because this prevents people from configuring your package for cross-compiling. -- Macro: AC_RUN_IFELSE (INPUT, [ACTION-IF-TRUE], [ACTION-IF-FALSE], [ACTION-IF-CROSS-COMPILING = ‘AC_MSG_FAILURE’]) Run the compiler (and compilation flags) and the linker of the current language (*note Language Choice::) on the INPUT, then execute the resulting program. If the program returns an exit status of 0 when executed, run shell commands ACTION-IF-TRUE. Otherwise, run shell commands ACTION-IF-FALSE. The INPUT can be made by ‘AC_LANG_PROGRAM’ and friends. ‘LDFLAGS’ and ‘LIBS’ are used for linking, in addition to the compilation flags of the current language (*note Language Choice::). Additionally, ACTION-IF-TRUE can run ‘./conftest$EXEEXT’ for further testing. In the ACTION-IF-FALSE section, the failing exit status is available in the shell variable ‘$?’. This exit status might be that of a failed compilation, or it might be that of a failed program execution. If cross-compilation mode is enabled (this is the case if either the compiler being used does not produce executables that run on the system where ‘configure’ is being run, or if the options ‘--build’ and ‘--host’ were both specified and their values are different), then the test program is not run. If the optional shell commands ACTION-IF-CROSS-COMPILING are given, those commands are run instead; typically these commands provide pessimistic defaults that allow cross-compilation to work even if the guess was wrong. If the fourth argument is empty or omitted, but cross-compilation is detected, then ‘configure’ prints an error message and exits. If you want your package to be useful in a cross-compilation scenario, you _should_ provide a non-empty ACTION-IF-CROSS-COMPILING clause, as well as wrap the ‘AC_RUN_IFELSE’ compilation inside an ‘AC_CACHE_CHECK’ (*note Caching Results::) which allows the user to override the pessimistic default if needed. It is customary to report unexpected failures with ‘AC_MSG_FAILURE’. ‘autoconf’ prints a warning message when creating ‘configure’ each time it encounters a call to ‘AC_RUN_IFELSE’ with no ACTION-IF-CROSS-COMPILING argument given. If you are not concerned about users configuring your package for cross-compilation, you may ignore the warning. A few of the macros distributed with Autoconf produce this warning message; but if this is a problem for you, please report it as a bug, along with an appropriate pessimistic guess to use instead. To configure for cross-compiling you can also choose a value for those parameters based on the canonical system name (*note Manual Configuration::). Alternatively, set up a test results cache file with the correct values for the host system (*note Caching Results::). To provide a default for calls of ‘AC_RUN_IFELSE’ that are embedded in other macros, including a few of the ones that come with Autoconf, you can test whether the shell variable ‘cross_compiling’ is set to ‘yes’, and then use an alternate method to get the results instead of calling the macros. It is also permissible to temporarily assign to ‘cross_compiling’ in order to force tests to behave as though they are in a cross-compilation environment, particularly since this provides a way to test your ACTION-IF-CROSS-COMPILING even when you are not using a cross-compiler. # We temporarily set cross-compile mode to force AC_COMPUTE_INT # to use the slow link-only method save_cross_compiling=$cross_compiling cross_compiling=yes AC_COMPUTE_INT([...]) cross_compiling=$save_cross_compiling A C or C++ runtime test should be portable. *Note Portable C and C++::. Erlang tests must exit themselves the Erlang VM by calling the ‘halt/1’ function: the given status code is used to determine the success of the test (status is ‘0’) or its failure (status is different than ‘0’), as explained above. It must be noted that data output through the standard output (e.g., using ‘io:format/2’) may be truncated when halting the VM. Therefore, if a test must output configuration information, it is recommended to create and to output data into the temporary file named ‘conftest.out’, using the functions of module ‘file’. The ‘conftest.out’ file is automatically deleted by the ‘AC_RUN_IFELSE’ macro. For instance, a simplified implementation of Autoconf’s ‘AC_ERLANG_SUBST_LIB_DIR’ macro is: AC_INIT([LibdirTest], [1.0], [bug-libdirtest@example.org]) AC_ERLANG_NEED_ERL AC_LANG(Erlang) AC_RUN_IFELSE( [AC_LANG_PROGRAM([], [dnl file:write_file("conftest.out", code:lib_dir()), halt(0)])], [echo "code:lib_dir() returned: `cat conftest.out`"], [AC_MSG_FAILURE([test Erlang program execution failed])])  File: autoconf.info, Node: Systemology, Next: Multiple Cases, Prev: Runtime, Up: Writing Tests 6.7 Systemology =============== This section aims at presenting some systems and pointers to documentation. It may help you addressing particular problems reported by users. Posix-conforming systems (https://en.wikipedia.org/wiki/POSIX) are derived from the Unix operating system (https://en.wikipedia.org/wiki/Unix). The Rosetta Stone for Unix (http://bhami.com/rosetta.html) contains a table correlating the features of various Posix-conforming systems. Unix History (https://www.levenez.com/unix/) is a simplified diagram of how many Unix systems were derived from each other. The Heirloom Project (http://heirloom.sourceforge.net/) provides some variants of traditional implementations of Unix utilities. Darwin Darwin is also known as Mac OS X. Beware that the file system _can_ be case-preserving, but case insensitive. This can cause nasty problems, since for instance the installation attempt for a package having an ‘INSTALL’ file can result in ‘make install’ report that nothing was to be done! That’s all dependent on whether the file system is a UFS (case sensitive) or HFS+ (case preserving). By default Apple wants you to install the OS on HFS+. Unfortunately, there are some pieces of software which really need to be built on UFS. We may want to rebuild Darwin to have both UFS and HFS+ available (and put the /local/build tree on the UFS). QNX 4.25 QNX is a realtime operating system running on Intel architecture meant to be scalable from the small embedded systems to the hundred processor super-computer. It claims to be Posix certified. More information is available on the QNX home page (https://blackberry.qnx.com/en). Unix version 7 Officially this was called the “Seventh Edition” of “the UNIX time-sharing system” but we use the more-common name “Unix version 7”. Documentation is available in the Unix Seventh Edition Manual (https://s3.amazonaws.com/plan9-bell-labs/7thEdMan/index.html). Previous versions of Unix are called “Unix version 6”, etc., but they were not as widely used.  File: autoconf.info, Node: Multiple Cases, Prev: Systemology, Up: Writing Tests 6.8 Multiple Cases ================== Some operations are accomplished in several possible ways, depending on the OS variant. Checking for them essentially requires a “case statement”. Autoconf does not directly provide one; however, it is easy to simulate by using a shell variable to keep track of whether a way to perform the operation has been found yet. Here is an example that uses the shell variable ‘fstype’ to keep track of whether the remaining cases need to be checked. Note that since the value of ‘fstype’ is under our control, we don’t have to use the longer ‘test "x$fstype" = xno’. AC_MSG_CHECKING([how to get file system type]) fstype=no # The order of these tests is important. AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include #include ]])], [AC_DEFINE([FSTYPE_STATVFS], [1], [Define if statvfs exists.]) fstype=SVR4]) if test $fstype = no; then AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include #include ]])], [AC_DEFINE([FSTYPE_USG_STATFS], [1], [Define if USG statfs.]) fstype=SVR3]) fi if test $fstype = no; then AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include #include ]])]), [AC_DEFINE([FSTYPE_AIX_STATFS], [1], [Define if AIX statfs.]) fstype=AIX]) fi # (more cases omitted here) AC_MSG_RESULT([$fstype])  File: autoconf.info, Node: Results, Next: Programming in M4, Prev: Writing Tests, Up: Top 7 Results of Tests ****************** Once ‘configure’ has determined whether a feature exists, what can it do to record that information? There are four sorts of things it can do: define a C preprocessor symbol, set a variable in the output files, save the result in a cache file for future ‘configure’ runs, and print a message letting the user know the result of the test. * Menu: * Defining Symbols:: Defining C preprocessor symbols * Setting Output Variables:: Replacing variables in output files * Special Chars in Variables:: Characters to beware of in variables * Caching Results:: Speeding up subsequent ‘configure’ runs * Printing Messages:: Notifying ‘configure’ users  File: autoconf.info, Node: Defining Symbols, Next: Setting Output Variables, Up: Results 7.1 Defining C Preprocessor Symbols =================================== A common action to take in response to a feature test is to define a C preprocessor symbol indicating the results of the test. That is done by calling ‘AC_DEFINE’ or ‘AC_DEFINE_UNQUOTED’. By default, ‘AC_OUTPUT’ places the symbols defined by these macros into the output variable ‘DEFS’, which contains an option ‘-DSYMBOL=VALUE’ for each symbol defined. Unlike in Autoconf version 1, there is no variable ‘DEFS’ defined while ‘configure’ is running. To check whether Autoconf macros have already defined a certain C preprocessor symbol, test the value of the appropriate cache variable, as in this example: AC_CHECK_FUNC([vprintf], [AC_DEFINE([HAVE_VPRINTF], [1], [Define if vprintf exists.])]) if test "x$ac_cv_func_vprintf" != xyes; then AC_CHECK_FUNC([_doprnt], [AC_DEFINE([HAVE_DOPRNT], [1], [Define if _doprnt exists.])]) fi If ‘AC_CONFIG_HEADERS’ has been called, then instead of creating ‘DEFS’, ‘AC_OUTPUT’ creates a header file by substituting the correct values into ‘#define’ statements in a template file. *Note Configuration Headers::, for more information about this kind of output. -- Macro: AC_DEFINE (VARIABLE, VALUE, [DESCRIPTION]) -- Macro: AC_DEFINE (VARIABLE) Define VARIABLE to VALUE (verbatim), by defining a C preprocessor macro for VARIABLE. VARIABLE should be a C identifier, optionally suffixed by a parenthesized argument list to define a C preprocessor macro with arguments. The macro argument list, if present, should be a comma-separated list of C identifiers, possibly terminated by an ellipsis ‘...’ if C99-or-later syntax is employed. VARIABLE should not contain comments, white space, trigraphs, backslash-newlines, universal character names, or non-ASCII characters. VALUE may contain backslash-escaped newlines, which will be preserved if you use ‘AC_CONFIG_HEADERS’ but flattened if passed via ‘@DEFS@’ (with no effect on the compilation, since the preprocessor sees only one line in the first place). VALUE should not contain raw newlines. If you are not using ‘AC_CONFIG_HEADERS’, VALUE should not contain any ‘#’ characters, as ‘make’ tends to eat them. To use a shell variable, use ‘AC_DEFINE_UNQUOTED’ instead. DESCRIPTION is only useful if you are using ‘AC_CONFIG_HEADERS’. In this case, DESCRIPTION is put into the generated ‘config.h.in’ as the comment before the macro define. The following example defines the C preprocessor variable ‘EQUATION’ to be the string constant ‘"$a > $b"’: AC_DEFINE([EQUATION], ["$a > $b"], [Equation string.]) If neither VALUE nor DESCRIPTION are given, then VALUE defaults to 1 instead of to the empty string. This is for backwards compatibility with older versions of Autoconf, but this usage is obsolescent and may be withdrawn in future versions of Autoconf. If the VARIABLE is a literal string, it is passed to ‘m4_pattern_allow’ (*note Forbidden Patterns::). If multiple ‘AC_DEFINE’ statements are executed for the same VARIABLE name (not counting any parenthesized argument list), the last one wins. -- Macro: AC_DEFINE_UNQUOTED (VARIABLE, VALUE, [DESCRIPTION]) -- Macro: AC_DEFINE_UNQUOTED (VARIABLE) Like ‘AC_DEFINE’, but three shell expansions are performed—once—on VARIABLE and VALUE: variable expansion (‘$’), command substitution (‘`’), and backslash escaping (‘\’), as if in an unquoted here-document. Single and double quote characters in the value have no special meaning. Use this macro instead of ‘AC_DEFINE’ when VARIABLE or VALUE is a shell variable. Examples: AC_DEFINE_UNQUOTED([config_machfile], ["$machfile"], [Configuration machine file.]) AC_DEFINE_UNQUOTED([GETGROUPS_T], [$ac_cv_type_getgroups], [getgroups return type.]) AC_DEFINE_UNQUOTED([$ac_tr_hdr], [1], [Translated header name.]) Due to a syntactical oddity of the Bourne shell, do not use semicolons to separate ‘AC_DEFINE’ or ‘AC_DEFINE_UNQUOTED’ calls from other macro calls or shell code; that can cause syntax errors in the resulting ‘configure’ script. Use either blanks or newlines. That is, do this: AC_CHECK_HEADER([elf.h], [AC_DEFINE([SVR4], [1], [System V Release 4]) LIBS="-lelf $LIBS"]) or this: AC_CHECK_HEADER([elf.h], [AC_DEFINE([SVR4], [1], [System V Release 4]) LIBS="-lelf $LIBS"]) instead of this: AC_CHECK_HEADER([elf.h], [AC_DEFINE([SVR4], [1], [System V Release 4]); LIBS="-lelf $LIBS"])  File: autoconf.info, Node: Setting Output Variables, Next: Special Chars in Variables, Prev: Defining Symbols, Up: Results 7.2 Setting Output Variables ============================ Another way to record the results of tests is to set “output variables”, which are shell variables whose values are substituted into files that ‘configure’ outputs. The two macros below create new output variables. *Note Preset Output Variables::, for a list of output variables that are always available. -- Macro: AC_SUBST (VARIABLE, [VALUE]) Create an output variable from a shell variable. Make ‘AC_OUTPUT’ substitute the variable VARIABLE into output files (typically one or more makefiles). This means that ‘AC_OUTPUT’ replaces instances of ‘@VARIABLE@’ in input files with the value that the shell variable VARIABLE has when ‘AC_OUTPUT’ is called. The value can contain any non-‘NUL’ character, including newline. If you are using Automake 1.11 or newer, for newlines in values you might want to consider using ‘AM_SUBST_NOTMAKE’ to prevent ‘automake’ from adding a line ‘VARIABLE = @VARIABLE@’ to the ‘Makefile.in’ files (*note Automake: (automake)Optional.). Variable occurrences should not overlap: e.g., an input file should not contain ‘@VAR1@VAR2@’ if VAR1 and VAR2 are variable names. The substituted value is not rescanned for more output variables; occurrences of ‘@VARIABLE@’ in the value are inserted literally into the output file. (The algorithm uses the special marker ‘|#_!!_#|’ internally, so neither the substituted value nor the output file may contain ‘|#_!!_#|’.) If VALUE is given, in addition assign it to VARIABLE. The string VARIABLE is passed to ‘m4_pattern_allow’ (*note Forbidden Patterns::). VARIABLE is not further expanded, even if there is another macro by the same name. -- Macro: AC_SUBST_FILE (VARIABLE) Another way to create an output variable from a shell variable. Make ‘AC_OUTPUT’ insert (without substitutions) the contents of the file named by shell variable VARIABLE into output files. This means that ‘AC_OUTPUT’ replaces instances of ‘@VARIABLE@’ in output files (such as ‘Makefile.in’) with the contents of the file that the shell variable VARIABLE names when ‘AC_OUTPUT’ is called. Set the variable to ‘/dev/null’ for cases that do not have a file to insert. This substitution occurs only when the ‘@VARIABLE@’ is on a line by itself, optionally surrounded by spaces and tabs. The substitution replaces the whole line, including the spaces, tabs, and the terminating newline. This macro is useful for inserting makefile fragments containing special dependencies or other ‘make’ directives for particular host or target types into makefiles. For example, ‘configure.ac’ could contain: AC_SUBST_FILE([host_frag]) host_frag=$srcdir/conf/sun4.mh and then a ‘Makefile.in’ could contain: @host_frag@ The string VARIABLE is passed to ‘m4_pattern_allow’ (*note Forbidden Patterns::). Running ‘configure’ in varying environments can be extremely dangerous. If for instance the user runs ‘CC=bizarre-cc ./configure’, then the cache, ‘config.h’, and many other output files depend upon ‘bizarre-cc’ being the C compiler. If for some reason the user runs ‘./configure’ again, or if it is run via ‘./config.status --recheck’, (*Note Automatic Remaking::, and *note config.status Invocation::), then the configuration can be inconsistent, composed of results depending upon two different compilers. Environment variables that affect this situation, such as ‘CC’ above, are called “precious variables”, and can be declared as such by ‘AC_ARG_VAR’. -- Macro: AC_ARG_VAR (VARIABLE, DESCRIPTION) Declare VARIABLE is a precious variable, and include its DESCRIPTION in the variable section of ‘./configure --help’. Being precious means that − VARIABLE is substituted via ‘AC_SUBST’. − The value of VARIABLE when ‘configure’ was launched is saved in the cache, including if it was not specified on the command line but via the environment. Indeed, while ‘configure’ can notice the definition of ‘CC’ in ‘./configure CC=bizarre-cc’, it is impossible to notice it in ‘CC=bizarre-cc ./configure’, which, unfortunately, is what most users do. We emphasize that it is the _initial_ value of VARIABLE which is saved, not that found during the execution of ‘configure’. Indeed, specifying ‘./configure FOO=foo’ and letting ‘./configure’ guess that ‘FOO’ is ‘foo’ can be two different things. − VARIABLE is checked for consistency between two ‘configure’ runs. For instance: $ ./configure --silent --config-cache $ CC=cc ./configure --silent --config-cache configure: error: 'CC' was not set in the previous run configure: error: changes in the environment can compromise \ the build configure: error: run 'make distclean' and/or \ 'rm config.cache' and start over and similarly if the variable is unset, or if its content is changed. If the content has white space changes only, then the error is degraded to a warning only, but the old value is reused. − VARIABLE is kept during automatic reconfiguration (*note config.status Invocation::) as if it had been passed as a command line argument, including when no cache is used: $ CC=/usr/bin/cc ./configure var=raboof --silent $ ./config.status --recheck running CONFIG_SHELL=/bin/sh /bin/sh ./configure var=raboof \ CC=/usr/bin/cc --no-create --no-recursion  File: autoconf.info, Node: Special Chars in Variables, Next: Caching Results, Prev: Setting Output Variables, Up: Results 7.3 Special Characters in Output Variables ========================================== Many output variables are intended to be evaluated both by ‘make’ and by the shell. Some characters are expanded differently in these two contexts, so to avoid confusion these variables’ values should not contain any of the following characters: " # $ & ' ( ) * ; < > ? [ \ ^ ` | Also, these variables’ values should neither contain newlines, nor start with ‘~’, nor contain white space or ‘:’ immediately followed by ‘~’. The values can contain nonempty sequences of white space characters like tabs and spaces, but each such sequence might arbitrarily be replaced by a single space during substitution. These restrictions apply both to the values that ‘configure’ computes, and to the values set directly by the user. For example, the following invocations of ‘configure’ are problematic, since they attempt to use special characters within ‘CPPFLAGS’ and white space within ‘$(srcdir)’: CPPFLAGS='-DOUCH="&\"#$*?"' '../My Source/ouch-1.0/configure' '../My Source/ouch-1.0/configure' CPPFLAGS='-DOUCH="&\"#$*?"'  File: autoconf.info, Node: Caching Results, Next: Printing Messages, Prev: Special Chars in Variables, Up: Results 7.4 Caching Results =================== To avoid checking for the same features repeatedly in various ‘configure’ scripts (or in repeated runs of one script), ‘configure’ can optionally save the results of many checks in a “cache file” (*note Cache Files::). If a ‘configure’ script runs with caching enabled and finds a cache file, it reads the results of previous runs from the cache and avoids rerunning those checks. As a result, ‘configure’ can then run much faster than if it had to perform all of the checks every time. -- Macro: AC_CACHE_VAL (CACHE-ID, COMMANDS-TO-SET-IT) Ensure that the results of the check identified by CACHE-ID are available. If the results of the check were in the cache file that was read, and ‘configure’ was not given the ‘--quiet’ or ‘--silent’ option, print a message saying that the result was cached; otherwise, run the shell commands COMMANDS-TO-SET-IT. If the shell commands are run to determine the value, the value is saved in the cache file just before ‘configure’ creates its output files. *Note Cache Variable Names::, for how to choose the name of the CACHE-ID variable. The COMMANDS-TO-SET-IT _must have no side effects_ except for setting the variable CACHE-ID, see below. -- Macro: AC_CACHE_CHECK (MESSAGE, CACHE-ID, COMMANDS-TO-SET-IT) A wrapper for ‘AC_CACHE_VAL’ that takes care of printing the messages. This macro provides a convenient shorthand for the most common way to use these macros. It calls ‘AC_MSG_CHECKING’ for MESSAGE, then ‘AC_CACHE_VAL’ with the CACHE-ID and COMMANDS arguments, and ‘AC_MSG_RESULT’ with CACHE-ID. The COMMANDS-TO-SET-IT _must have no side effects_ except for setting the variable CACHE-ID, see below. It is common to find buggy macros using ‘AC_CACHE_VAL’ or ‘AC_CACHE_CHECK’, because people are tempted to call ‘AC_DEFINE’ in the COMMANDS-TO-SET-IT. Instead, the code that _follows_ the call to ‘AC_CACHE_VAL’ should call ‘AC_DEFINE’, by examining the value of the cache variable. For instance, the following macro is broken: AC_DEFUN([AC_SHELL_TRUE], [AC_CACHE_CHECK([whether true(1) works], [my_cv_shell_true_works], [my_cv_shell_true_works=no (true) 2>/dev/null && my_cv_shell_true_works=yes if test "x$my_cv_shell_true_works" = xyes; then AC_DEFINE([TRUE_WORKS], [1], [Define if 'true(1)' works properly.]) fi]) ]) This fails if the cache is enabled: the second time this macro is run, ‘TRUE_WORKS’ _will not be defined_. The proper implementation is: AC_DEFUN([AC_SHELL_TRUE], [AC_CACHE_CHECK([whether true(1) works], [my_cv_shell_true_works], [my_cv_shell_true_works=no (true) 2>/dev/null && my_cv_shell_true_works=yes]) if test "x$my_cv_shell_true_works" = xyes; then AC_DEFINE([TRUE_WORKS], [1], [Define if 'true(1)' works properly.]) fi ]) Also, COMMANDS-TO-SET-IT should not print any messages, for example with ‘AC_MSG_CHECKING’; do that before calling ‘AC_CACHE_VAL’, so the messages are printed regardless of whether the results of the check are retrieved from the cache or determined by running the shell commands. * Menu: * Cache Variable Names:: Shell variables used in caches * Cache Files:: Files ‘configure’ uses for caching * Cache Checkpointing:: Loading and saving the cache file  File: autoconf.info, Node: Cache Variable Names, Next: Cache Files, Up: Caching Results 7.4.1 Cache Variable Names -------------------------- The names of cache variables should have the following format: PACKAGE-PREFIX_cv_VALUE-TYPE_SPECIFIC-VALUE_[ADDITIONAL-OPTIONS] for example, ‘ac_cv_header_stat_broken’ or ‘ac_cv_prog_gcc_traditional’. The parts of the variable name are: PACKAGE-PREFIX An abbreviation for your package or organization; the same prefix you begin local Autoconf macros with, except lowercase by convention. For cache values used by the distributed Autoconf macros, this value is ‘ac’. ‘_cv_’ Indicates that this shell variable is a cache value. This string _must_ be present in the variable name, including the leading underscore. VALUE-TYPE A convention for classifying cache values, to produce a rational naming system. The values used in Autoconf are listed in *note Macro Names::. SPECIFIC-VALUE Which member of the class of cache values this test applies to. For example, which function (‘alloca’), program (‘gcc’), or output variable (‘INSTALL’). ADDITIONAL-OPTIONS Any particular behavior of the specific member that this test applies to. For example, ‘broken’ or ‘set’. This part of the name may be omitted if it does not apply. The values assigned to cache variables may not contain newlines. Usually, their values are Boolean (‘yes’ or ‘no’) or the names of files or functions; so this is not an important restriction. *note Cache Variable Index:: for an index of cache variables with documented semantics.  File: autoconf.info, Node: Cache Files, Next: Cache Checkpointing, Prev: Cache Variable Names, Up: Caching Results 7.4.2 Cache Files ----------------- A cache file is a shell script that caches the results of configure tests run on one system so they can be shared between configure scripts and configure runs. It is not useful on other systems. If its contents are invalid for some reason, the user may delete or edit it, or override documented cache variables on the ‘configure’ command line. By default, ‘configure’ uses no cache file, to avoid problems caused by accidental use of stale cache files. To enable caching, ‘configure’ accepts ‘--config-cache’ (or ‘-C’) to cache results in the file ‘config.cache’. Alternatively, ‘--cache-file=FILE’ specifies that FILE be the cache file. The cache file is created if it does not exist already. When ‘configure’ calls ‘configure’ scripts in subdirectories, it uses the ‘--cache-file’ argument so that they share the same cache. *Note Subdirectories::, for information on configuring subdirectories with the ‘AC_CONFIG_SUBDIRS’ macro. ‘config.status’ only pays attention to the cache file if it is given the ‘--recheck’ option, which makes it rerun ‘configure’. It is wrong to try to distribute cache files for particular system types. There is too much room for error in doing that, and too much administrative overhead in maintaining them. For any features that can’t be guessed automatically, use the standard method of the canonical system type and linking files (*note Manual Configuration::). The site initialization script can specify a site-wide cache file to use, instead of the usual per-program cache. In this case, the cache file gradually accumulates information whenever someone runs a new ‘configure’ script. (Running ‘configure’ merges the new cache results with the existing cache file.) This may cause problems, however, if the system configuration (e.g., the installed libraries or compilers) changes and the stale cache file is not deleted. If ‘configure’ is interrupted at the right time when it updates a cache file outside of the build directory where the ‘configure’ script is run, it may leave behind a temporary file named after the cache file with digits following it. You may safely delete such a file.  File: autoconf.info, Node: Cache Checkpointing, Prev: Cache Files, Up: Caching Results 7.4.3 Cache Checkpointing ------------------------- If your configure script, or a macro called from ‘configure.ac’, happens to abort the configure process, it may be useful to checkpoint the cache a few times at key points using ‘AC_CACHE_SAVE’. Doing so reduces the amount of time it takes to rerun the configure script with (hopefully) the error that caused the previous abort corrected. -- Macro: AC_CACHE_LOAD Loads values from existing cache file, or creates a new cache file if a cache file is not found. Called automatically from ‘AC_INIT’. -- Macro: AC_CACHE_SAVE Flushes all cached values to the cache file. Called automatically from ‘AC_OUTPUT’, but it can be quite useful to call ‘AC_CACHE_SAVE’ at key points in ‘configure.ac’. For instance: ... AC_INIT, etc. ... # Checks for programs. AC_PROG_CC AC_PROG_AWK ... more program checks ... AC_CACHE_SAVE # Checks for libraries. AC_CHECK_LIB([nsl], [gethostbyname]) AC_CHECK_LIB([socket], [connect]) ... more lib checks ... AC_CACHE_SAVE # Might abort... AM_PATH_GTK([1.0.2], [], [AC_MSG_ERROR([GTK not in path])]) AM_PATH_GTKMM([0.9.5], [], [AC_MSG_ERROR([GTK not in path])]) ... AC_OUTPUT, etc. ...  File: autoconf.info, Node: Printing Messages, Prev: Caching Results, Up: Results 7.5 Printing Messages ===================== ‘configure’ scripts need to give users running them several kinds of information. The following macros print messages in ways appropriate for each kind. The arguments to all of them get enclosed in shell double quotes, so the shell performs variable and back-quote substitution on them. These macros are all wrappers around the ‘echo’ shell command. They direct output to the appropriate file descriptor (*note File Descriptor Macros::). ‘configure’ scripts should rarely need to run ‘echo’ directly to print messages for the user. Using these macros makes it easy to change how and when each kind of message is printed; such changes need only be made to the macro definitions and all the callers change automatically. To diagnose static issues, i.e., when ‘autoconf’ is run, see *note Diagnostic Macros::. -- Macro: AC_MSG_CHECKING (FEATURE-DESCRIPTION) Notify the user that ‘configure’ is checking for a particular feature. This macro prints a message that starts with ‘checking ’ and ends with ‘...’ and no newline. It must be followed by a call to ‘AC_MSG_RESULT’ to print the result of the check and the newline. The FEATURE-DESCRIPTION should be something like ‘whether the Fortran compiler accepts C++ comments’ or ‘for _Alignof’. This macro prints nothing if ‘configure’ is run with the ‘--quiet’ or ‘--silent’ option. -- Macro: AC_MSG_RESULT (RESULT-DESCRIPTION) Notify the user of the results of a check. RESULT-DESCRIPTION is almost always the value of the cache variable for the check, typically ‘yes’, ‘no’, or a file name. This macro should follow a call to ‘AC_MSG_CHECKING’, and the RESULT-DESCRIPTION should be the completion of the message printed by the call to ‘AC_MSG_CHECKING’. This macro prints nothing if ‘configure’ is run with the ‘--quiet’ or ‘--silent’ option. -- Macro: AC_MSG_NOTICE (MESSAGE) Deliver the MESSAGE to the user. It is useful mainly to print a general description of the overall purpose of a group of feature checks, e.g., AC_MSG_NOTICE([checking if stack overflow is detectable]) This macro prints nothing if ‘configure’ is run with the ‘--quiet’ or ‘--silent’ option. -- Macro: AC_MSG_ERROR (ERROR-DESCRIPTION, [EXIT-STATUS = ‘$?/1’]) Notify the user of an error that prevents ‘configure’ from completing. This macro prints an error message to the standard error output and exits ‘configure’ with EXIT-STATUS (‘$?’ by default, except that ‘0’ is converted to ‘1’). ERROR-DESCRIPTION should be something like ‘invalid value $HOME for \$HOME’. The ERROR-DESCRIPTION should start with a lower-case letter, and “cannot” is preferred to “can’t”. -- Macro: AC_MSG_FAILURE (ERROR-DESCRIPTION, [EXIT-STATUS]) This ‘AC_MSG_ERROR’ wrapper notifies the user of an error that prevents ‘configure’ from completing _and_ that additional details are provided in ‘config.log’. This is typically used when abnormal results are found during a compilation. -- Macro: AC_MSG_WARN (PROBLEM-DESCRIPTION) Notify the ‘configure’ user of a possible problem. This macro prints the message to the standard error output; ‘configure’ continues running afterward, so macros that call ‘AC_MSG_WARN’ should provide a default (back-up) behavior for the situations they warn about. PROBLEM-DESCRIPTION should be something like ‘ln -s seems to make hard links’.  File: autoconf.info, Node: Programming in M4, Next: Programming in M4sh, Prev: Results, Up: Top 8 Programming in M4 ******************* Autoconf is written on top of two layers: “M4sugar”, which provides convenient macros for pure M4 programming, and “M4sh”, which provides macros dedicated to shell script generation. As of this version of Autoconf, these two layers still contain experimental macros, whose interface might change in the future. As a matter of fact, _anything that is not documented must not be used_. * Menu: * M4 Quotation:: Protecting macros from unwanted expansion * Using autom4te:: The Autoconf executables backbone * Programming in M4sugar:: Convenient pure M4 macros * Debugging via autom4te:: Figuring out what M4 was doing  File: autoconf.info, Node: M4 Quotation, Next: Using autom4te, Up: Programming in M4 8.1 M4 Quotation ================ The most common problem with existing macros is an improper quotation. This section, which users of Autoconf can skip, but which macro writers _must_ read, first justifies the quotation scheme that was chosen for Autoconf and then ends with a rule of thumb. Understanding the former helps one to follow the latter. * Menu: * Active Characters:: Characters that change the behavior of M4 * One Macro Call:: Quotation and one macro call * Quoting and Parameters:: M4 vs. shell parameters * Quotation and Nested Macros:: Macros calling macros * Changequote is Evil:: Worse than INTERCAL: M4 + changequote * Quadrigraphs:: Another way to escape special characters * Balancing Parentheses:: Dealing with unbalanced parentheses * Quotation Rule Of Thumb:: One parenthesis, one quote  File: autoconf.info, Node: Active Characters, Next: One Macro Call, Up: M4 Quotation 8.1.1 Active Characters ----------------------- To fully understand where proper quotation is important, you first need to know what the special characters are in Autoconf: ‘#’ introduces a comment inside which no macro expansion is performed, ‘,’ separates arguments, ‘[’ and ‘]’ are the quotes themselves(1), ‘(’ and ‘)’ (which M4 tries to match by pairs), and finally ‘$’ inside a macro definition. In order to understand the delicate case of macro calls, we first have to present some obvious failures. Below they are “obvious-ified”, but when you find them in real life, they are usually in disguise. Comments, introduced by a hash and running up to the newline, are opaque tokens to the top level: active characters are turned off, and there is no macro expansion: # define([def], ine) ⇒# define([def], ine) Each time there can be a macro expansion, there is a quotation expansion, i.e., one level of quotes is stripped: int tab[10]; ⇒int tab10; [int tab[10];] ⇒int tab[10]; Without this in mind, the reader might try hopelessly to use her macro ‘array’: define([array], [int tab[10];]) array ⇒int tab10; [array] ⇒array How can you correctly output the intended results(2)? ---------- Footnotes ---------- (1) By itself, M4 uses ‘`’ and ‘'’; it is the M4sugar layer that sets up the preferred quotes of ‘[’ and ‘]’. (2) Using ‘defn’.  File: autoconf.info, Node: One Macro Call, Next: Quoting and Parameters, Prev: Active Characters, Up: M4 Quotation 8.1.2 One Macro Call -------------------- Let’s proceed on the interaction between active characters and macros with this small macro, which just returns its first argument: define([car], [$1]) The two pairs of quotes above are not part of the arguments of ‘define’; rather, they are understood by the top level when it tries to find the arguments of ‘define’. Therefore, assuming ‘car’ is not already defined, it is equivalent to write: define(car, $1) But, while it is acceptable for a ‘configure.ac’ to avoid unnecessary quotes, it is bad practice for Autoconf macros which must both be more robust and also advocate perfect style. At the top level, there are only two possibilities: either you quote or you don’t: car(foo, bar, baz) ⇒foo [car(foo, bar, baz)] ⇒car(foo, bar, baz) Let’s pay attention to the special characters: car(#) error→EOF in argument list The closing parenthesis is hidden in the comment; with a hypothetical quoting, the top level understood it this way: car([#)] Proper quotation, of course, fixes the problem: car([#]) ⇒# Here are more examples: car(foo, bar) ⇒foo car([foo, bar]) ⇒foo, bar car((foo, bar)) ⇒(foo, bar) car([(foo], [bar)]) ⇒(foo define([a], [b]) ⇒ car(a) ⇒b car([a]) ⇒b car([[a]]) ⇒a car([[[a]]]) ⇒[a]  File: autoconf.info, Node: Quoting and Parameters, Next: Quotation and Nested Macros, Prev: One Macro Call, Up: M4 Quotation 8.1.3 Quoting and Parameters ---------------------------- When M4 encounters ‘$’ within a macro definition, followed immediately by a character it recognizes (‘0’...‘9’, ‘#’, ‘@’, or ‘*’), it will perform M4 parameter expansion. This happens regardless of how many layers of quotes the parameter expansion is nested within, or even if it occurs in text that will be rescanned as a comment. define([none], [$1]) ⇒ define([one], [[$1]]) ⇒ define([two], [[[$1]]]) ⇒ define([comment], [# $1]) ⇒ define([active], [ACTIVE]) ⇒ none([active]) ⇒ACTIVE one([active]) ⇒active two([active]) ⇒[active] comment([active]) ⇒# active On the other hand, since autoconf generates shell code, you often want to output shell variable expansion, rather than performing M4 parameter expansion. To do this, you must use M4 quoting to separate the ‘$’ from the next character in the definition of your macro. If the macro definition occurs in single-quoted text, then insert another level of quoting; if the usage is already inside a double-quoted string, then split it into concatenated strings. define([foo], [a single-quoted $[]1 definition]) ⇒ define([bar], [[a double-quoted $][1 definition]]) ⇒ foo ⇒a single-quoted $1 definition bar ⇒a double-quoted $1 definition Posix states that M4 implementations are free to provide implementation extensions when ‘${’ is encountered in a macro definition. Autoconf reserves the longer sequence ‘${{’ for use with planned extensions that will be available in the future GNU M4 2.0, but guarantees that all other instances of ‘${’ will be output literally. Therefore, this idiom can also be used to output shell code parameter references: define([first], [${1}])first ⇒${1} Posix also states that ‘$11’ should expand to the first parameter concatenated with a literal ‘1’, although some versions of GNU M4 expand the eleventh parameter instead. For portability, you should only use single-digit M4 parameter expansion. With this in mind, we can explore the cases where macros invoke macros...  File: autoconf.info, Node: Quotation and Nested Macros, Next: Changequote is Evil, Prev: Quoting and Parameters, Up: M4 Quotation 8.1.4 Quotation and Nested Macros --------------------------------- The examples below use the following macros: define([car], [$1]) define([active], [ACT, IVE]) define([array], [int tab[10]]) Each additional embedded macro call introduces other possible interesting quotations: car(active) ⇒ACT car([active]) ⇒ACT, IVE car([[active]]) ⇒active In the first case, the top level looks for the arguments of ‘car’, and finds ‘active’. Because M4 evaluates its arguments before applying the macro, ‘active’ is expanded, which results in: car(ACT, IVE) ⇒ACT In the second case, the top level gives ‘active’ as first and only argument of ‘car’, which results in: active ⇒ACT, IVE i.e., the argument is evaluated _after_ the macro that invokes it. In the third case, ‘car’ receives ‘[active]’, which results in: [active] ⇒active exactly as we already saw above. The example above, applied to a more realistic example, gives: car(int tab[10];) ⇒int tab10; car([int tab[10];]) ⇒int tab10; car([[int tab[10];]]) ⇒int tab[10]; Huh? The first case is easily understood, but why is the second wrong, and the third right? To understand that, you must know that after M4 expands a macro, the resulting text is immediately subjected to macro expansion and quote removal. This means that the quote removal occurs twice—first before the argument is passed to the ‘car’ macro, and second after the ‘car’ macro expands to the first argument. As the author of the Autoconf macro ‘car’, you then consider it to be incorrect that your users have to double-quote the arguments of ‘car’, so you “fix” your macro. Let’s call it ‘qar’ for quoted car: define([qar], [[$1]]) and check that ‘qar’ is properly fixed: qar([int tab[10];]) ⇒int tab[10]; Ahhh! That’s much better. But note what you’ve done: now that the result of ‘qar’ is always a literal string, the only time a user can use nested macros is if she relies on an _unquoted_ macro call: qar(active) ⇒ACT qar([active]) ⇒active leaving no way for her to reproduce what she used to do with ‘car’: car([active]) ⇒ACT, IVE Worse yet: she wants to use a macro that produces a set of ‘cpp’ macros: define([my_includes], [#include ]) car([my_includes]) ⇒#include qar(my_includes) error→EOF in argument list This macro, ‘qar’, because it double quotes its arguments, forces its users to leave their macro calls unquoted, which is dangerous. Commas and other active symbols are interpreted by M4 before they are given to the macro, often not in the way the users expect. Also, because ‘qar’ behaves differently from the other macros, it’s an exception that should be avoided in Autoconf.  File: autoconf.info, Node: Changequote is Evil, Next: Quadrigraphs, Prev: Quotation and Nested Macros, Up: M4 Quotation 8.1.5 ‘changequote’ is Evil --------------------------- The temptation is often high to bypass proper quotation, in particular when it’s late at night. Then, many experienced Autoconf hackers finally surrender to the dark side of the force and use the ultimate weapon: ‘changequote’. The M4 builtin ‘changequote’ belongs to a set of primitives that allow one to adjust the syntax of the language to adjust it to one’s needs. For instance, by default M4 uses ‘`’ and ‘'’ as quotes, but in the context of shell programming (and actually of most programming languages), that’s about the worst choice one can make: because of strings and back-quoted expressions in shell code (such as ‘'this'’ and ‘`that`’), and because of literal characters in usual programming languages (as in ‘'0'’), there are many unbalanced ‘`’ and ‘'’. Proper M4 quotation then becomes a nightmare, if not impossible. In order to make M4 useful in such a context, its designers have equipped it with ‘changequote’, which makes it possible to choose another pair of quotes. M4sugar, M4sh, Autoconf, and Autotest all have chosen to use ‘[’ and ‘]’. Not especially because they are unlikely characters, but _because they are characters unlikely to be unbalanced_. There are other magic primitives, such as ‘changecom’ to specify what syntactic forms are comments (it is common to see ‘changecom()’ when M4 is used to produce HTML pages), ‘changeword’ and ‘changesyntax’ to change other syntactic details (such as the character to denote the Nth argument, ‘$’ by default, the parentheses around arguments, etc.). These primitives are really meant to make M4 more useful for specific domains: they should be considered like command line options: ‘--quotes’, ‘--comments’, ‘--words’, and ‘--syntax’. Nevertheless, they are implemented as M4 builtins, as it makes M4 libraries self contained (no need for additional options). There lies the problem... The problem is that it is then tempting to use them in the middle of an M4 script, as opposed to its initialization. This, if not carefully thought out, can lead to disastrous effects: _you are changing the language in the middle of the execution_. Changing and restoring the syntax is often not enough: if you happened to invoke macros in between, these macros are lost, as the current syntax is probably not the one they were implemented with.  File: autoconf.info, Node: Quadrigraphs, Next: Balancing Parentheses, Prev: Changequote is Evil, Up: M4 Quotation 8.1.6 Quadrigraphs ------------------ When writing an Autoconf macro you may occasionally need to generate special characters that are difficult to express with the standard Autoconf quoting rules. For example, you may need to output the regular expression ‘[^[]’, which matches any character other than ‘[’. This expression contains unbalanced brackets so it cannot be put easily into an M4 macro. Additionally, there are a few m4sugar macros (such as ‘m4_split’ and ‘m4_expand’) which internally use special markers in addition to the regular quoting characters. If the arguments to these macros contain the literal strings ‘-=<{(’ or ‘)}>=-’, the macros might behave incorrectly. You can work around these problems by using one of the following “quadrigraphs”: ‘@<:@’ ‘[’ ‘@:>@’ ‘]’ ‘@S|@’ ‘$’ ‘@%:@’ ‘#’ ‘@{:@’ ‘(’ ‘@:}@’ ‘)’ ‘@&t@’ Expands to nothing. Quadrigraphs are replaced at a late stage of the translation process, after ‘m4’ is run, so they do not get in the way of M4 quoting. For example, the string ‘^@<:@’, independently of its quotation, appears as ‘^[’ in the output. The empty quadrigraph can be used: − to mark trailing spaces explicitly Trailing spaces are smashed by ‘autom4te’. This is a feature. − to produce quadrigraphs and other strings reserved by m4sugar For instance ‘@<@&t@:@’ produces ‘@<:@’. For a more contrived example: m4_define([a], [A])m4_define([b], [B])m4_define([c], [C])dnl m4_split([a )}>=- b -=<{( c]) ⇒[a], [], [B], [], [c] m4_split([a )}@&t@>=- b -=<@&t@{( c]) ⇒[a], [)}>=-], [b], [-=<{(], [c] − to escape _occurrences_ of forbidden patterns For instance you might want to mention ‘AC_FOO’ in a comment, while still being sure that ‘autom4te’ still catches unexpanded ‘AC_*’. Then write ‘AC@&t@_FOO’. The name ‘@&t@’ was suggested by Paul Eggert: I should give some credit to the ‘@&t@’ pun. The ‘&’ is my own invention, but the ‘t’ came from the source code of the ALGOL68C compiler, written by Steve Bourne (of Bourne shell fame), and which used ‘mt’ to denote the empty string. In C, it would have looked like something like: char const mt[] = ""; but of course the source code was written in Algol 68. I don’t know where he got ‘mt’ from: it could have been his own invention, and I suppose it could have been a common pun around the Cambridge University computer lab at the time.  File: autoconf.info, Node: Balancing Parentheses, Next: Quotation Rule Of Thumb, Prev: Quadrigraphs, Up: M4 Quotation 8.1.7 Dealing with unbalanced parentheses ----------------------------------------- One of the pitfalls of portable shell programming is that if you intend your script to run with obsolescent shells, ‘case’ statements require unbalanced parentheses. *Note Limitations of Shell Builtins: case. With syntax highlighting editors, the presence of unbalanced ‘)’ can interfere with editors that perform syntax highlighting of macro contents based on finding the matching ‘(’. Another concern is how much editing must be done when transferring code snippets between shell scripts and macro definitions. But most importantly, the presence of unbalanced parentheses can introduce expansion bugs. For an example, here is an underquoted attempt to use the macro ‘my_case’, which happens to expand to a portable ‘case’ statement: AC_DEFUN([my_case], [case $file_name in *.c) echo "C source code";; esac]) AS_IF(:, my_case) In the above example, the ‘AS_IF’ call under-quotes its arguments. As a result, the unbalanced ‘)’ generated by the premature expansion of ‘my_case’ results in expanding ‘AS_IF’ with a truncated parameter, and the expansion is syntactically invalid: if :; then case $file_name in *.c fi echo "C source code";; esac) If nothing else, this should emphasize the importance of the quoting arguments to macro calls. On the other hand, there are several variations for defining ‘my_case’ to be more robust, even when used without proper quoting, each with some benefits and some drawbacks. Use left parenthesis before pattern AC_DEFUN([my_case], [case $file_name in (*.c) echo "C source code";; esac]) This is simple and provides balanced parentheses. Although this is not portable to obsolescent shells (notably Solaris 10 ‘/bin/sh’), platforms with these shells invariably have a more-modern shell available somewhere so this approach typically suffices nowadays. Creative literal shell comment AC_DEFUN([my_case], [case $file_name in #( *.c) echo "C source code";; esac]) This version provides balanced parentheses to several editors, and can be copied and pasted into a terminal as is. Unfortunately, it is still unbalanced as an Autoconf argument, since ‘#(’ is an M4 comment that masks the normal properties of ‘(’. Quadrigraph shell comment AC_DEFUN([my_case], [case $file_name in @%:@( *.c) echo "C source code";; esac]) This version provides balanced parentheses to even more editors, and can be used as a balanced Autoconf argument. Unfortunately, it requires some editing before it can be copied and pasted into a terminal, and the use of the quadrigraph ‘@%:@’ for ‘#’ reduces readability. Quoting just the parenthesis AC_DEFUN([my_case], [case $file_name in *.c[)] echo "C source code";; esac]) This version quotes the ‘)’, so that it can be used as a balanced Autoconf argument. As written, this is not balanced to an editor, but it can be coupled with ‘[#(]’ to meet that need, too. However, it still requires some edits before it can be copied and pasted into a terminal. Double-quoting the entire statement AC_DEFUN([my_case], [[case $file_name in #( *.c) echo "C source code";; esac]]) Since the entire macro is double-quoted, there is no problem with using this as an Autoconf argument; and since the double-quoting is over the entire statement, this code can be easily copied and pasted into a terminal. However, the double quoting prevents the expansion of any macros inside the case statement, which may cause its own set of problems. Using ‘AS_CASE’ AC_DEFUN([my_case], [AS_CASE([$file_name], [*.c], [echo "C source code"])]) This version avoids the balancing issue altogether, by relying on ‘AS_CASE’ (*note Common Shell Constructs::); it also allows for the expansion of ‘AC_REQUIRE’ to occur prior to the entire case statement, rather than within a branch of the case statement that might not be taken. However, the abstraction comes with a penalty that it is no longer a quick copy, paste, and edit to get back to shell code.  File: autoconf.info, Node: Quotation Rule Of Thumb, Prev: Balancing Parentheses, Up: M4 Quotation 8.1.8 Quotation Rule Of Thumb ----------------------------- To conclude, the quotation rule of thumb is: _One pair of quotes per pair of parentheses._ Never over-quote, never under-quote, in particular in the definition of macros. In the few places where the macros need to use brackets (usually in C program text or regular expressions), properly quote _the arguments_! It is common to read Autoconf programs with snippets like: AC_TRY_LINK( changequote(<<, >>)dnl <<#include #ifndef tzname /* For SGI. */ extern char *tzname[]; /* RS6000 and others reject char **tzname. */ #endif>>, changequote([, ])dnl [atoi (*tzname);], ac_cv_var_tzname=yes, ac_cv_var_tzname=no) which is incredibly useless since ‘AC_TRY_LINK’ is _already_ double quoting, so you just need: AC_TRY_LINK( [#include #ifndef tzname /* For SGI. */ extern char *tzname[]; /* RS6000 and others reject char **tzname. */ #endif], [atoi (*tzname);], [ac_cv_var_tzname=yes], [ac_cv_var_tzname=no]) The M4-fluent reader might note that these two examples are rigorously equivalent, since M4 swallows both the ‘changequote(<<, >>)’ and ‘<<’ ‘>>’ when it “collects” the arguments: these quotes are not part of the arguments! Simplified, the example above is just doing this: changequote(<<, >>)dnl <<[]>> changequote([, ])dnl instead of simply: [[]] With macros that do not double quote their arguments (which is the rule), double-quote the (risky) literals: AC_LINK_IFELSE([AC_LANG_PROGRAM( [[#include #ifndef tzname /* For SGI. */ extern char *tzname[]; /* RS6000 and others reject char **tzname. */ #endif]], [atoi (*tzname);])], [ac_cv_var_tzname=yes], [ac_cv_var_tzname=no]) Please note that the macro ‘AC_TRY_LINK’ is obsolete, so you really should be using ‘AC_LINK_IFELSE’ instead. *Note Quadrigraphs::, for what to do if you run into a hopeless case where quoting does not suffice. When you create a ‘configure’ script using newly written macros, examine it carefully to check whether you need to add more quotes in your macros. If one or more words have disappeared in the M4 output, you need more quotes. When in doubt, quote. However, it’s also possible to put on too many layers of quotes. If this happens, the resulting ‘configure’ script may contain unexpanded macros. The ‘autoconf’ program checks for this problem by looking for the string ‘AC_’ in ‘configure’. However, this heuristic does not work in general: for example, it does not catch overquoting in ‘AC_DEFINE’ descriptions.  File: autoconf.info, Node: Using autom4te, Next: Programming in M4sugar, Prev: M4 Quotation, Up: Programming in M4 8.2 Using ‘autom4te’ ==================== The Autoconf suite, including M4sugar, M4sh, and Autotest, in addition to Autoconf per se, heavily rely on M4. All these different uses revealed common needs factored into a layer over M4: ‘autom4te’(1). ‘autom4te’ is a preprocessor that is like ‘m4’. It supports M4 extensions designed for use in tools like Autoconf. * Menu: * autom4te Invocation:: A GNU M4 wrapper * Customizing autom4te:: Customizing the Autoconf package ---------- Footnotes ---------- (1) Yet another great name from Lars J. Aas.  File: autoconf.info, Node: autom4te Invocation, Next: Customizing autom4te, Up: Using autom4te 8.2.1 Invoking ‘autom4te’ ------------------------- The command line arguments are modeled after M4’s: autom4te OPTIONS FILES where the FILES are directly passed to ‘m4’. By default, GNU M4 is found during configuration, but the environment variable ‘M4’ can be set to tell ‘autom4te’ where to look. In addition to the regular expansion, it handles the replacement of the quadrigraphs (*note Quadrigraphs::), and of ‘__oline__’, the current line in the output. It supports an extended syntax for the FILES: ‘FILE.m4f’ This file is an M4 frozen file. Note that _all the previous files are ignored_. See the ‘--melt’ option for the rationale. ‘FILE?’ If found in the library path, the FILE is included for expansion, otherwise it is ignored instead of triggering a failure. Of course, it supports the Autoconf common subset of options: ‘--help’ ‘-h’ Print a summary of the command line options and exit. ‘--version’ ‘-V’ Print the version number of Autoconf and exit. ‘--verbose’ ‘-v’ Report processing steps. ‘--debug’ ‘-d’ Don’t remove the temporary files and be even more verbose. ‘--include=DIR’ ‘-I DIR’ Also look for input files in DIR. Multiple invocations accumulate. ‘--output=FILE’ ‘-o FILE’ Save output (script or trace) to FILE. The file ‘-’ stands for the standard output. As an extension of ‘m4’, it includes the following options: ‘--warnings=CATEGORY[,CATEGORY...]’ ‘-WCATEGORY[,CATEGORY...]’ Enable or disable warnings related to each CATEGORY. *Note m4_warn::, for a comprehensive list of categories. Special values include: ‘all’ Enable all categories of warnings. ‘none’ Disable all categories of warnings. ‘error’ Treat all warnings as errors. ‘no-CATEGORY’ Disable warnings falling into CATEGORY. The enviroment variable ‘WARNINGS’ may also be set to a comma-separated list of warning categories to enable or disable. It is interpreted exactly the same way as the argument of ‘--warnings’, but unknown categories are silently ignored. The command line takes precedence; for instance, if ‘WARNINGS’ is set to ‘obsolete’, but ‘-Wnone’ is given on the command line, no warnings will be issued. Some categories of warnings are on by default. Again, for details see *note m4_warn::. ‘--melt’ ‘-M’ Do not use frozen files. Any argument ‘FILE.m4f’ is replaced by ‘FILE.m4’. This helps tracing the macros which are executed only when the files are frozen, typically ‘m4_define’. For instance, running: autom4te --melt 1.m4 2.m4f 3.m4 4.m4f input.m4 is roughly equivalent to running: m4 1.m4 2.m4 3.m4 4.m4 input.m4 while autom4te 1.m4 2.m4f 3.m4 4.m4f input.m4 is equivalent to: m4 --reload-state=4.m4f input.m4 ‘--freeze’ ‘-F’ Produce a frozen state file. ‘autom4te’ freezing is stricter than M4’s: it must produce no warnings, and no output other than empty lines (a line with white space is _not_ empty) and comments (starting with ‘#’). Unlike ‘m4’’s similarly-named option, this option takes no argument: autom4te 1.m4 2.m4 3.m4 --freeze --output=3.m4f corresponds to m4 1.m4 2.m4 3.m4 --freeze-state=3.m4f ‘--mode=OCTAL-MODE’ ‘-m OCTAL-MODE’ Set the mode of the non-traces output to OCTAL-MODE; by default ‘0666’. As another additional feature over ‘m4’, ‘autom4te’ caches its results. GNU M4 is able to produce a regular output and traces at the same time. Traces are heavily used in the GNU Build System: ‘autoheader’ uses them to build ‘config.h.in’, ‘autoreconf’ to determine what GNU Build System components are used, ‘automake’ to “parse” ‘configure.ac’ etc. To avoid recomputation, traces are cached while performing regular expansion, and conversely. This cache is (actually, the caches are) stored in the directory ‘autom4te.cache’. _It can safely be removed_ at any moment (especially if for some reason ‘autom4te’ considers it trashed). ‘--cache=DIRECTORY’ ‘-C DIRECTORY’ Specify the name of the directory where the result should be cached. Passing an empty value disables caching. Be sure to pass a relative file name, as for the time being, global caches are not supported. ‘--no-cache’ Don’t cache the results. ‘--force’ ‘-f’ If a cache is used, consider it obsolete (but update it anyway). Because traces are so important to the GNU Build System, ‘autom4te’ provides high level tracing features as compared to M4, and helps exploiting the cache: ‘--trace=MACRO[:FORMAT]’ ‘-t MACRO[:FORMAT]’ Trace the invocations of MACRO according to the FORMAT. Multiple ‘--trace’ arguments can be used to list several macros. Multiple ‘--trace’ arguments for a single macro are not cumulative; instead, you should just make FORMAT as long as needed. The FORMAT is a regular string, with newlines if desired, and several special escape codes. It defaults to ‘$f:$l:$n:$%’. It can use the following special escapes: ‘$$’ The character ‘$’. ‘$f’ The file name from which MACRO is called. ‘$l’ The line number from which MACRO is called. ‘$d’ The depth of the MACRO call. This is an M4 technical detail that you probably don’t want to know about. ‘$n’ The name of the MACRO. ‘$NUM’ The NUMth argument of the call to MACRO. ‘$@’ ‘$SEP@’ ‘${SEPARATOR}@’ All the arguments passed to MACRO, separated by the character SEP or the string SEPARATOR (‘,’ by default). Each argument is quoted, i.e., enclosed in a pair of square brackets. ‘$*’ ‘$SEP*’ ‘${SEPARATOR}*’ As above, but the arguments are not quoted. ‘$%’ ‘$SEP%’ ‘${SEPARATOR}%’ As above, but the arguments are not quoted, all new line characters in the arguments are smashed, and the default separator is ‘:’. The escape ‘$%’ produces single-line trace outputs (unless you put newlines in the ‘separator’), while ‘$@’ and ‘$*’ do not. *Note autoconf Invocation::, for examples of trace uses. ‘--preselect=MACRO’ ‘-p MACRO’ Cache the traces of MACRO, but do not enable traces. This is especially important to save CPU cycles in the future. For instance, when invoked, ‘autoconf’ pre-selects all the macros that ‘autoheader’, ‘automake’, ‘autoreconf’, etc., trace, so that running ‘m4’ is not needed to trace them: the cache suffices. This results in a huge speed-up. Finally, ‘autom4te’ introduces the concept of “Autom4te libraries”. They consists in a powerful yet extremely simple feature: sets of combined command line arguments: ‘--language=LANGUAGE’ ‘-l LANGUAGE’ Use the LANGUAGE Autom4te library. Current languages include: ‘M4sugar’ create M4sugar output. ‘M4sh’ create M4sh executable shell scripts. ‘Autotest’ create Autotest executable test suites. ‘Autoconf-without-aclocal-m4’ create Autoconf executable configure scripts without reading ‘aclocal.m4’. ‘Autoconf’ create Autoconf executable configure scripts. This language inherits all the characteristics of ‘Autoconf-without-aclocal-m4’ and additionally reads ‘aclocal.m4’. ‘--prepend-include=DIR’ ‘-B DIR’ Prepend directory DIR to the search path. This is used to include the language-specific files before any third-party macros. As an example, if Autoconf is installed in its default location, ‘/usr/local’, the command ‘autom4te -l m4sugar foo.m4’ is strictly equivalent to the command: autom4te --prepend-include /usr/local/share/autoconf \ m4sugar/m4sugar.m4f foo.m4 Recursive expansion applies here: the command ‘autom4te -l m4sh foo.m4’ is the same as ‘autom4te --language M4sugar m4sugar/m4sh.m4f foo.m4’, i.e.: autom4te --prepend-include /usr/local/share/autoconf \ m4sugar/m4sugar.m4f m4sugar/m4sh.m4f --mode 777 foo.m4 The definition of the languages is stored in ‘autom4te.cfg’.  File: autoconf.info, Node: Customizing autom4te, Prev: autom4te Invocation, Up: Using autom4te 8.2.2 Customizing ‘autom4te’ ---------------------------- One can customize ‘autom4te’ via ‘~/.autom4te.cfg’ (i.e., as found in the user home directory), and ‘./.autom4te.cfg’ (i.e., as found in the directory from which ‘autom4te’ is run). The order is first reading ‘autom4te.cfg’, then ‘~/.autom4te.cfg’, then ‘./.autom4te.cfg’, and finally the command line arguments. In these text files, comments are introduced with ‘#’, and empty lines are ignored. Customization is performed on a per-language basis, wrapped in between a ‘begin-language: "LANGUAGE"’, ‘end-language: "LANGUAGE"’ pair. Customizing a language stands for appending options (*note autom4te Invocation::) to the current definition of the language. Options, and more generally arguments, are introduced by ‘args: ARGUMENTS’. You may use the traditional shell syntax to quote the ARGUMENTS. As an example, to disable Autoconf caches (‘autom4te.cache’) globally, include the following lines in ‘~/.autom4te.cfg’: ## ------------------ ## ## User Preferences. ## ## ------------------ ## begin-language: "Autoconf-without-aclocal-m4" args: --no-cache end-language: "Autoconf-without-aclocal-m4"  File: autoconf.info, Node: Programming in M4sugar, Next: Debugging via autom4te, Prev: Using autom4te, Up: Programming in M4 8.3 Programming in M4sugar ========================== M4 by itself provides only a small, but sufficient, set of all-purpose macros. M4sugar introduces additional generic macros. Its name was coined by Lars J. Aas: “Readability And Greater Understanding Stands 4 M4sugar”. M4sugar reserves the macro namespace ‘^_m4_’ for internal use, and the macro namespace ‘^m4_’ for M4sugar macros. You should not define your own macros into these namespaces. * Menu: * Redefined M4 Macros:: M4 builtins changed in M4sugar * Diagnostic Macros:: Diagnostic messages from M4sugar * Diversion support:: Diversions in M4sugar * Conditional constructs:: Conditions in M4 * Looping constructs:: Iteration in M4 * Evaluation Macros:: More quotation and evaluation control * Text processing Macros:: String manipulation in M4 * Number processing Macros:: Arithmetic computation in M4 * Set manipulation Macros:: Set manipulation in M4 * Forbidden Patterns:: Catching unexpanded macros  File: autoconf.info, Node: Redefined M4 Macros, Next: Diagnostic Macros, Up: Programming in M4sugar 8.3.1 Redefined M4 Macros ------------------------- With a few exceptions, all the M4 native macros are moved in the ‘m4_’ pseudo-namespace, e.g., M4sugar renames ‘define’ as ‘m4_define’ etc. The list of macros unchanged from M4, except for their name, is: − m4_builtin − m4_changecom − m4_changequote − m4_debugfile − m4_debugmode − m4_decr − m4_define − m4_divnum − m4_errprint − m4_esyscmd − m4_eval − m4_format − m4_ifdef − m4_incr − m4_index − m4_indir − m4_len − m4_pushdef − m4_shift − m4_substr − m4_syscmd − m4_sysval − m4_traceoff − m4_traceon − m4_translit Some M4 macros are redefined, and are slightly incompatible with their native equivalent. -- Macro: __file__ -- Macro: __line__ All M4 macros starting with ‘__’ retain their original name: for example, no ‘m4__file__’ is defined. -- Macro: __oline__ This is not technically a macro, but a feature of Autom4te. The sequence ‘__oline__’ can be used similarly to the other m4sugar location macros, but rather than expanding to the location of the input file, it is translated to the line number where it appears in the output file after all other M4 expansions. -- Macro: dnl This macro kept its original name: no ‘m4_dnl’ is defined. -- Macro: m4_bpatsubst (STRING, REGEXP, [REPLACEMENT]) This macro corresponds to ‘patsubst’. The name ‘m4_patsubst’ is kept for future versions of M4sugar, once GNU M4 2.0 is released and supports extended regular expression syntax. -- Macro: m4_bregexp (STRING, REGEXP, [REPLACEMENT]) This macro corresponds to ‘regexp’. The name ‘m4_regexp’ is kept for future versions of M4sugar, once GNU M4 2.0 is released and supports extended regular expression syntax. -- Macro: m4_copy (SOURCE, DEST) -- Macro: m4_copy_force (SOURCE, DEST) -- Macro: m4_rename (SOURCE, DEST) -- Macro: m4_rename_force (SOURCE, DEST) These macros aren’t directly builtins, but are closely related to ‘m4_pushdef’ and ‘m4_defn’. ‘m4_copy’ and ‘m4_rename’ ensure that DEST is undefined, while ‘m4_copy_force’ and ‘m4_rename_force’ overwrite any existing definition. All four macros then proceed to copy the entire pushdef stack of definitions of SOURCE over to DEST. ‘m4_copy’ and ‘m4_copy_force’ preserve the source (including in the special case where SOURCE is undefined), while ‘m4_rename’ and ‘m4_rename_force’ undefine the original macro name (making it an error to rename an undefined SOURCE). Note that attempting to invoke a renamed macro might not work, since the macro may have a dependence on helper macros accessed via composition of ‘$0’ but that were not also renamed; likewise, other macros may have a hard-coded dependence on SOURCE and could break if SOURCE has been deleted. On the other hand, it is always safe to rename a macro to temporarily move it out of the way, then rename it back later to restore original semantics. -- Macro: m4_defn (MACRO...) This macro fails if MACRO is not defined, even when using older versions of M4 that did not warn. See ‘m4_undefine’. Unfortunately, in order to support these older versions of M4, there are some situations involving unbalanced quotes where concatenating multiple macros together will work in newer M4 but not in m4sugar; use quadrigraphs to work around this. -- Macro: m4_divert (DIVERSION) M4sugar relies heavily on diversions, so rather than behaving as a primitive, ‘m4_divert’ behaves like: m4_divert_pop()m4_divert_push([DIVERSION]) *Note Diversion support::, for more details about the use of the diversion stack. In particular, this implies that DIVERSION should be a named diversion rather than a raw number. But be aware that it is seldom necessary to explicitly change the diversion stack, and that when done incorrectly, it can lead to syntactically invalid scripts. -- Macro: m4_dumpdef (NAME...) -- Macro: m4_dumpdefs (NAME...) ‘m4_dumpdef’ is like the M4 builtin, except that this version requires at least one argument, output always goes to standard error rather than the current debug file, no sorting is done on multiple arguments, and an error is issued if any NAME is undefined. ‘m4_dumpdefs’ is a convenience macro that calls ‘m4_dumpdef’ for all of the ‘m4_pushdef’ stack of definitions, starting with the current, and silently does nothing if NAME is undefined. Unfortunately, due to a limitation in M4 1.4.x, any macro defined as a builtin is output as the empty string. This behavior is rectified by using M4 1.6 or newer. However, this behavior difference means that ‘m4_dumpdef’ should only be used while developing m4sugar macros, and never in the final published form of a macro. -- Macro: m4_esyscmd_s (COMMAND) Like ‘m4_esyscmd’, this macro expands to the result of running COMMAND in a shell. The difference is that any trailing newlines are removed, so that the output behaves more like shell command substitution. -- Macro: m4_exit (EXIT-STATUS) This macro corresponds to ‘m4exit’. -- Macro: m4_if (COMMENT) -- Macro: m4_if (STRING-1, STRING-2, EQUAL, [NOT-EQUAL]) -- Macro: m4_if (STRING-1, STRING-2, EQUAL-1, STRING-3, STRING-4, EQUAL-2, ..., [NOT-EQUAL]) This macro corresponds to ‘ifelse’. STRING-1 and STRING-2 are compared literally, so usually one of the two arguments is passed unquoted. *Note Conditional constructs::, for more conditional idioms. -- Macro: m4_include (FILE) -- Macro: m4_sinclude (FILE) Like the M4 builtins, but warn against multiple inclusions of FILE. -- Macro: m4_mkstemp (TEMPLATE) -- Macro: m4_maketemp (TEMPLATE) Posix requires ‘maketemp’ to replace the trailing ‘X’ characters in TEMPLATE with the process id, without regards to the existence of a file by that name, but this a security hole. When this was pointed out to the Posix folks, they agreed to invent a new macro ‘mkstemp’ that always creates a uniquely named file, but not all versions of GNU M4 support the new macro. In M4sugar, ‘m4_maketemp’ and ‘m4_mkstemp’ are synonyms for each other, and both have the secure semantics regardless of which macro the underlying M4 provides. -- Macro: m4_popdef (MACRO...) This macro fails if MACRO is not defined, even when using older versions of M4 that did not warn. See ‘m4_undefine’. -- Macro: m4_undefine (MACRO...) This macro fails if MACRO is not defined, even when using older versions of M4 that did not warn. Use m4_ifdef([MACRO], [m4_undefine([MACRO])]) if you are not sure whether MACRO is defined. -- Macro: m4_undivert (DIVERSION...) Unlike the M4 builtin, at least one DIVERSION must be specified. Also, since the M4sugar diversion stack prefers named diversions, the use of ‘m4_undivert’ to include files is risky. *Note Diversion support::, for more details about the use of the diversion stack. But be aware that it is seldom necessary to explicitly change the diversion stack, and that when done incorrectly, it can lead to syntactically invalid scripts. -- Macro: m4_wrap (TEXT) -- Macro: m4_wrap_lifo (TEXT) These macros correspond to ‘m4wrap’. Posix requires arguments of multiple wrap calls to be reprocessed at EOF in the same order as the original calls (first-in, first-out). GNU M4 versions through 1.4.10, however, reprocess them in reverse order (last-in, first-out). Both orders are useful, therefore, you can rely on ‘m4_wrap’ to provide FIFO semantics and ‘m4_wrap_lifo’ for LIFO semantics, regardless of the underlying GNU M4 version. Unlike the GNU M4 builtin, these macros only recognize one argument, and avoid token pasting between consecutive invocations. On the other hand, nested calls to ‘m4_wrap’ from within wrapped text work just as in the builtin.  File: autoconf.info, Node: Diagnostic Macros, Next: Diversion support, Prev: Redefined M4 Macros, Up: Programming in M4sugar 8.3.2 Diagnostic messages from M4sugar -------------------------------------- When macros statically diagnose abnormal situations, benign or fatal, they should report them using these macros. For issuing dynamic issues, i.e., when ‘configure’ is run, see *note Printing Messages::. -- Macro: m4_assert (EXPRESSION, [EXIT-STATUS = ‘1’]) Assert that the arithmetic EXPRESSION evaluates to non-zero. Otherwise, issue a fatal error, and exit ‘autom4te’ with EXIT-STATUS. -- Macro: m4_errprintn (MESSAGE) Similar to the builtin ‘m4_errprint’, except that a newline is guaranteed after MESSAGE. -- Macro: m4_fatal (MESSAGE) Report a severe error MESSAGE prefixed with the current location, and have ‘autom4te’ die. -- Macro: m4_location Useful as a prefix in a message line. Short for: __file__:__line__ -- Macro: m4_warn (CATEGORY, MESSAGE) Report MESSAGE as a warning (or as an error if requested by the user) if warnings of the CATEGORY are turned on. If the message is emitted, it is prefixed with the current location, and followed by a call trace of all macros defined via ‘AC_DEFUN’ used to get to the current expansion. The CATEGORY must be one of: ‘cross’ Warnings about constructs that may interfere with cross-compilation, such as using ‘AC_RUN_IFELSE’ without a default. ‘gnu’ Warnings related to the GNU Coding Standards (*note (standards)Top::). On by default. ‘obsolete’ Warnings about obsolete features. On by default. ‘override’ Warnings about redefinitions of Autoconf internals. ‘portability’ Warnings about non-portable constructs. ‘portability-recursive’ Warnings about recursive Make variable expansions (‘$(foo$(x))’). ‘extra-portability’ Extra warnings about non-portable constructs, covering rarely-used tools. ‘syntax’ Warnings about questionable syntactic constructs, incorrectly ordered macro calls, typos, etc. On by default. ‘unsupported’ Warnings about unsupported features. On by default. *Hacking Note:* The set of categories is defined by code in ‘autom4te’, not by M4sugar itself. Additions should be coordinated with Automake, so that both sets of tools accept the same options.  File: autoconf.info, Node: Diversion support, Next: Conditional constructs, Prev: Diagnostic Macros, Up: Programming in M4sugar 8.3.3 Diversion support ----------------------- M4sugar makes heavy use of diversions under the hood, because it is often the case that text that must appear early in the output is not discovered until late in the input. Additionally, some of the topological sorting algorithms used in resolving macro dependencies use diversions. However, most macros should not need to change diversions directly, but rather rely on higher-level M4sugar macros to manage diversions transparently. If you change diversions improperly, you risk generating a syntactically invalid script, because an incorrect diversion will violate assumptions made by many macros about whether prerequisite text has been previously output. In short, if you manually change the diversion, you should not expect any macros provided by the Autoconf package to work until you have restored the diversion stack back to its original state. In the rare case that it is necessary to write a macro that explicitly outputs text to a different diversion, it is important to be aware of an M4 limitation regarding diversions: text only goes to a diversion if it is not part of argument collection. Therefore, any macro that changes the current diversion cannot be used as an unquoted argument to another macro, but must be expanded at the top level. The macro ‘m4_expand’ will diagnose any attempt to change diversions, since it is generally useful only as an argument to another macro. The following example shows what happens when diversion manipulation is attempted within macro arguments: m4_do([normal text] m4_divert_push([KILL])unwanted[]m4_divert_pop([KILL]) [m4_divert_push([KILL])discarded[]m4_divert_pop([KILL])])dnl ⇒normal text ⇒unwanted Notice that the unquoted text ‘unwanted’ is output, even though it was processed while the current diversion was ‘KILL’, because it was collected as part of the argument to ‘m4_do’. However, the text ‘discarded’ disappeared as desired, because the diversion changes were single-quoted, and were not expanded until the top-level rescan of the output of ‘m4_do’. To make diversion management easier, M4sugar uses the concept of named diversions. Rather than using diversion numbers directly, it is nicer to associate a name with each diversion. The diversion number associated with a particular diversion name is an implementation detail, and a syntax warning is issued if a diversion number is used instead of a name. In general, you should not output text to a named diversion until after calling the appropriate initialization routine for your language (‘m4_init’, ‘AS_INIT’, ‘AT_INIT’, ...), although there are some exceptions documented below. M4sugar defines two named diversions. ‘KILL’ Text written to this diversion is discarded. This is the default diversion once M4sugar is initialized. ‘GROW’ This diversion is used behind the scenes by topological sorting macros, such as ‘AC_REQUIRE’. M4sh adds several more named diversions. ‘BINSH’ This diversion is reserved for the ‘#!’ interpreter line. ‘HEADER-REVISION’ This diversion holds text from ‘AC_REVISION’. ‘HEADER-COMMENT’ This diversion holds comments about the purpose of a file. ‘HEADER-COPYRIGHT’ This diversion is managed by ‘AC_COPYRIGHT’. ‘M4SH-SANITIZE’ This diversion contains M4sh sanitization code, used to ensure M4sh is executing in a reasonable shell environment. ‘M4SH-INIT’ This diversion contains M4sh initialization code, initializing variables that are required by other M4sh macros. ‘BODY’ This diversion contains the body of the shell code, and is the default diversion once M4sh is initialized. Autotest inherits diversions from M4sh, and changes the default diversion from ‘BODY’ back to ‘KILL’. It also adds several more named diversions, with the following subset designed for developer use. ‘PREPARE_TESTS’ This diversion contains initialization sequences which are executed after ‘atconfig’ and ‘atlocal’, and after all command line arguments have been parsed, but prior to running any tests. It can be used to set up state that is required across all tests. This diversion will work even before ‘AT_INIT’. Autoconf inherits diversions from M4sh, and adds the following named diversions which developers can utilize. ‘DEFAULTS’ This diversion contains shell variable assignments to set defaults that must be in place before arguments are parsed. This diversion is placed early enough in ‘configure’ that it is unsafe to expand any autoconf macros into this diversion. ‘HELP_ENABLE’ If ‘AC_PRESERVE_HELP_ORDER’ was used, then text placed in this diversion will be included as part of a quoted here-doc providing all of the ‘--help’ output of ‘configure’ related to options created by ‘AC_ARG_WITH’ and ‘AC_ARG_ENABLE’. ‘INIT_PREPARE’ This diversion occurs after all command line options have been parsed, but prior to the main body of the ‘configure’ script. This diversion is the last chance to insert shell code such as variable assignments or shell function declarations that will used by the expansion of other macros. For now, the remaining named diversions of Autoconf, Autoheader, and Autotest are not documented. In other words, intentionally outputting text into an undocumented diversion is subject to breakage in a future release of Autoconf. -- Macro: m4_cleardivert (DIVERSION...) Permanently discard any text that has been diverted into DIVERSION. -- Macro: m4_divert_once (DIVERSION, [CONTENT]) Similar to ‘m4_divert_text’, except that CONTENT is only output to DIVERSION if this is the first time that ‘m4_divert_once’ has been called with its particular arguments. -- Macro: m4_divert_pop ([DIVERSION]) If provided, check that the current diversion is indeed DIVERSION. Then change to the diversion located earlier on the stack, giving an error if an attempt is made to pop beyond the initial m4sugar diversion of ‘KILL’. -- Macro: m4_divert_push (DIVERSION) Remember the former diversion on the diversion stack, and output subsequent text into DIVERSION. M4sugar maintains a diversion stack, and issues an error if there is not a matching pop for every push. -- Macro: m4_divert_text (DIVERSION, [CONTENT]) Output CONTENT and a newline into DIVERSION, without affecting the current diversion. Shorthand for: m4_divert_push([DIVERSION])CONTENT m4_divert_pop([DIVERSION])dnl One use of ‘m4_divert_text’ is to develop two related macros, where macro ‘MY_A’ does the work, but adjusts what work is performed based on whether the optional macro ‘MY_B’ has also been expanded. Of course, it is possible to use ‘AC_BEFORE’ within ‘MY_A’ to require that ‘MY_B’ occurs first, if it occurs at all. But this imposes an ordering restriction on the user; it would be nicer if macros ‘MY_A’ and ‘MY_B’ can be invoked in either order. The trick is to let ‘MY_B’ leave a breadcrumb in an early diversion, which ‘MY_A’ can then use to determine whether ‘MY_B’ has been expanded. AC_DEFUN([MY_A], [# various actions if test -n "$b_was_used"; then # extra action fi]) AC_DEFUN([MY_B], [AC_REQUIRE([MY_A])dnl m4_divert_text([INIT_PREPARE], [b_was_used=true])]) -- Macro: m4_init Initialize the M4sugar environment, setting up the default named diversion to be ‘KILL’.  File: autoconf.info, Node: Conditional constructs, Next: Looping constructs, Prev: Diversion support, Up: Programming in M4sugar 8.3.4 Conditional constructs ---------------------------- The following macros provide additional conditional constructs as convenience wrappers around ‘m4_if’. -- Macro: m4_bmatch (STRING, REGEX-1, VALUE-1, [REGEX-2], [VALUE-2], ..., [DEFAULT]) The string STRING is repeatedly compared against a series of REGEX arguments; if a match is found, the expansion is the corresponding VALUE, otherwise, the macro moves on to the next REGEX. If no REGEX match, then the result is the optional DEFAULT, or nothing. -- Macro: m4_bpatsubsts (STRING, REGEX-1, SUBST-1, [REGEX-2], [SUBST-2], ...) The string STRING is altered by REGEX-1 and SUBST-1, as if by: m4_bpatsubst([[STRING]], [REGEX], [SUBST]) The result of the substitution is then passed through the next set of REGEX and SUBST, and so forth. An empty SUBST implies deletion of any matched portions in the current string. Note that this macro over-quotes STRING; this behavior is intentional, so that the result of each step of the recursion remains as a quoted string. However, it means that anchors (‘^’ and ‘$’ in the REGEX will line up with the extra quotations, and not the characters of the original string. The overquoting is removed after the final substitution. -- Macro: m4_case (STRING, VALUE-1, IF-VALUE-1, [VALUE-2], [IF-VALUE-2], ..., [DEFAULT]) Test STRING against multiple VALUE possibilities, resulting in the first IF-VALUE for a match, or in the optional DEFAULT. This is shorthand for: m4_if([STRING], [VALUE-1], [IF-VALUE-1], [STRING], [VALUE-2], [IF-VALUE-2], ..., [DEFAULT]) -- Macro: m4_cond (TEST-1, VALUE-1, IF-VALUE-1, [TEST-2], [VALUE-2], [IF-VALUE-2], ..., [DEFAULT]) This macro was introduced in Autoconf 2.62. Similar to ‘m4_if’, except that each TEST is expanded only when it is encountered. This is useful for short-circuiting expensive tests; while ‘m4_if’ requires all its strings to be expanded up front before doing comparisons, ‘m4_cond’ only expands a TEST when all earlier tests have failed. For an example, these two sequences give the same result, but in the case where ‘$1’ does not contain a backslash, the ‘m4_cond’ version only expands ‘m4_index’ once, instead of five times, for faster computation if this is a common case for ‘$1’. Notice that every third argument is unquoted for ‘m4_if’, and quoted for ‘m4_cond’: m4_if(m4_index([$1], [\]), [-1], [$2], m4_eval(m4_index([$1], [\\]) >= 0), [1], [$2], m4_eval(m4_index([$1], [\$]) >= 0), [1], [$2], m4_eval(m4_index([$1], [\`]) >= 0), [1], [$3], m4_eval(m4_index([$1], [\"]) >= 0), [1], [$3], [$2]) m4_cond([m4_index([$1], [\])], [-1], [$2], [m4_eval(m4_index([$1], [\\]) >= 0)], [1], [$2], [m4_eval(m4_index([$1], [\$]) >= 0)], [1], [$2], [m4_eval(m4_index([$1], [\`]) >= 0)], [1], [$3], [m4_eval(m4_index([$1], [\"]) >= 0)], [1], [$3], [$2]) -- Macro: m4_default (EXPR-1, EXPR-2) -- Macro: m4_default_quoted (EXPR-1, EXPR-2) -- Macro: m4_default_nblank (EXPR-1, [EXPR-2]) -- Macro: m4_default_nblank_quoted (EXPR-1, [EXPR-2]) If EXPR-1 contains text, use it. Otherwise, select EXPR-2. ‘m4_default’ expands the result, while ‘m4_default_quoted’ does not. Useful for providing a fixed default if the expression that results in EXPR-1 would otherwise be empty. The difference between ‘m4_default’ and ‘m4_default_nblank’ is whether an argument consisting of just blanks (space, tab, newline) is significant. When using the expanding versions, note that an argument may contain text but still expand to an empty string. m4_define([active], [ACTIVE])dnl m4_define([empty], [])dnl m4_define([demo1], [m4_default([$1], [$2])])dnl m4_define([demo2], [m4_default_quoted([$1], [$2])])dnl m4_define([demo3], [m4_default_nblank([$1], [$2])])dnl m4_define([demo4], [m4_default_nblank_quoted([$1], [$2])])dnl demo1([active], [default]) ⇒ACTIVE demo1([], [active]) ⇒ACTIVE demo1([empty], [text]) ⇒ -demo1([ ], [active])- ⇒- - demo2([active], [default]) ⇒active demo2([], [active]) ⇒active demo2([empty], [text]) ⇒empty -demo2([ ], [active])- ⇒- - demo3([active], [default]) ⇒ACTIVE demo3([], [active]) ⇒ACTIVE demo3([empty], [text]) ⇒ -demo3([ ], [active])- ⇒-ACTIVE- demo4([active], [default]) ⇒active demo4([], [active]) ⇒active demo4([empty], [text]) ⇒empty -demo4([ ], [active])- ⇒-active- -- Macro: m4_define_default (MACRO, [DEFAULT-DEFINITION]) If MACRO does not already have a definition, then define it to DEFAULT-DEFINITION. -- Macro: m4_ifblank (COND, [IF-BLANK], [IF-TEXT]) -- Macro: m4_ifnblank (COND, [IF-TEXT], [IF-BLANK]) If COND is empty or consists only of blanks (space, tab, newline), then expand IF-BLANK; otherwise, expand IF-TEXT. Two variants exist, in order to make it easier to select the correct logical sense when using only two parameters. Note that this is more efficient than the equivalent behavior of: m4_ifval(m4_normalize([COND]), IF-TEXT, IF-BLANK) -- Macro: m4_ifndef (MACRO, IF-NOT-DEFINED, [IF-DEFINED]) This is shorthand for: m4_ifdef([MACRO], [IF-DEFINED], [IF-NOT-DEFINED]) -- Macro: m4_ifset (MACRO, [IF-TRUE], [IF-FALSE]) If MACRO is undefined, or is defined as the empty string, expand to IF-FALSE. Otherwise, expands to IF-TRUE. Similar to: m4_ifval(m4_defn([MACRO]), [IF-TRUE], [IF-FALSE]) except that it is not an error if MACRO is undefined. -- Macro: m4_ifval (COND, [IF-TRUE], [IF-FALSE]) Expands to IF-TRUE if COND is not empty, otherwise to IF-FALSE. This is shorthand for: m4_if([COND], [], [IF-FALSE], [IF-TRUE]) -- Macro: m4_ifvaln (COND, [IF-TRUE], [IF-FALSE]) Similar to ‘m4_ifval’, except guarantee that a newline is present after any non-empty expansion. Often followed by ‘dnl’. -- Macro: m4_n (TEXT) Expand to TEXT, and add a newline if TEXT is not empty. Often followed by ‘dnl’.  File: autoconf.info, Node: Looping constructs, Next: Evaluation Macros, Prev: Conditional constructs, Up: Programming in M4sugar 8.3.5 Looping constructs ------------------------ The following macros are useful in implementing recursive algorithms in M4, including loop operations. An M4 list is formed by quoting a list of quoted elements; generally the lists are comma-separated, although ‘m4_foreach_w’ is whitespace-separated. For example, the list ‘[[a], [b,c]]’ contains two elements: ‘[a]’ and ‘[b,c]’. It is common to see lists with unquoted elements when those elements are not likely to be macro names, as in ‘[fputc_unlocked, fgetc_unlocked]’. Although not generally recommended, it is possible for quoted lists to have side effects; all side effects are expanded only once, and prior to visiting any list element. On the other hand, the fact that unquoted macros are expanded exactly once means that macros without side effects can be used to generate lists. For example, m4_foreach([i], [[1], [2], [3]m4_errprintn([hi])], [i]) error→hi ⇒123 m4_define([list], [[1], [2], [3]]) ⇒ m4_foreach([i], [list], [i]) ⇒123 -- Macro: m4_argn (N, [ARG]...) Extracts argument N (larger than 0) from the remaining arguments. If there are too few arguments, the empty string is used. For any N besides 1, this is more efficient than the similar ‘m4_car(m4_shiftn([N], [], [ARG...]))’. -- Macro: m4_car (ARG...) Expands to the quoted first ARG. Can be used with ‘m4_cdr’ to recursively iterate through a list. Generally, when using quoted lists of quoted elements, ‘m4_car’ should be called without any extra quotes. -- Macro: m4_cdr (ARG...) Expands to a quoted list of all but the first ARG, or the empty string if there was only one argument. Generally, when using quoted lists of quoted elements, ‘m4_cdr’ should be called without any extra quotes. For example, this is a simple implementation of ‘m4_map’; note how each iteration checks for the end of recursion, then merely applies the first argument to the first element of the list, then repeats with the rest of the list. (The actual implementation in M4sugar is a bit more involved, to gain some speed and share code with ‘m4_map_sep’, and also to avoid expanding side effects in ‘$2’ twice). m4_define([m4_map], [m4_ifval([$2], [m4_apply([$1], m4_car($2))[]$0([$1], m4_cdr($2))])])dnl m4_map([ m4_eval], [[[1]], [[1+1]], [[10],[16]]]) ⇒ 1 2 a -- Macro: m4_for (VAR, FIRST, LAST, [STEP], EXPRESSION) Loop over the numeric values between FIRST and LAST including bounds by increments of STEP. For each iteration, expand EXPRESSION with the numeric value assigned to VAR. If STEP is omitted, it defaults to ‘1’ or ‘-1’ depending on the order of the limits. If given, STEP has to match this order. The number of iterations is determined independently from definition of VAR; iteration cannot be short-circuited or lengthened by modifying VAR from within EXPRESSION. -- Macro: m4_foreach (VAR, LIST, EXPRESSION) Loop over the comma-separated M4 list LIST, assigning each value to VAR, and expand EXPRESSION. The following example outputs two lines: m4_foreach([myvar], [[foo], [bar, baz]], [echo myvar ])dnl ⇒echo foo ⇒echo bar, baz Note that for some forms of EXPRESSION, it may be faster to use ‘m4_map_args’. -- Macro: m4_foreach_w (VAR, LIST, EXPRESSION) Loop over the white-space-separated list LIST, assigning each value to VAR, and expand EXPRESSION. If VAR is only referenced once in EXPRESSION, it is more efficient to use ‘m4_map_args_w’. The deprecated macro ‘AC_FOREACH’ is an alias of ‘m4_foreach_w’. -- Macro: m4_map (MACRO, LIST) -- Macro: m4_mapall (MACRO, LIST) -- Macro: m4_map_sep (MACRO, SEPARATOR, LIST) -- Macro: m4_mapall_sep (MACRO, SEPARATOR, LIST) Loop over the comma separated quoted list of argument descriptions in LIST, and invoke MACRO with the arguments. An argument description is in turn a comma-separated quoted list of quoted elements, suitable for ‘m4_apply’. The macros ‘m4_map’ and ‘m4_map_sep’ ignore empty argument descriptions, while ‘m4_mapall’ and ‘m4_mapall_sep’ invoke MACRO with no arguments. The macros ‘m4_map_sep’ and ‘m4_mapall_sep’ additionally expand SEPARATOR between invocations of MACRO. Note that SEPARATOR is expanded, unlike in ‘m4_join’. When separating output with commas, this means that the map result can be used as a series of arguments, by using a single-quoted comma as SEPARATOR, or as a single string, by using a double-quoted comma. m4_map([m4_count], []) ⇒ m4_map([ m4_count], [[], [[1]], [[1], [2]]]) ⇒ 1 2 m4_mapall([ m4_count], [[], [[1]], [[1], [2]]]) ⇒ 0 1 2 m4_map_sep([m4_eval], [,], [[[1+2]], [[10], [16]]]) ⇒3,a m4_map_sep([m4_echo], [,], [[[a]], [[b]]]) ⇒a,b m4_count(m4_map_sep([m4_echo], [,], [[[a]], [[b]]])) ⇒2 m4_map_sep([m4_echo], [[,]], [[[a]], [[b]]]) ⇒a,b m4_count(m4_map_sep([m4_echo], [[,]], [[[a]], [[b]]])) ⇒1 -- Macro: m4_map_args (MACRO, ARG...) Repeatedly invoke MACRO with each successive ARG as its only argument. In the following example, three solutions are presented with the same expansion; the solution using ‘m4_map_args’ is the most efficient. m4_define([active], [ACTIVE])dnl m4_foreach([var], [[plain], [active]], [ m4_echo(m4_defn([var]))]) ⇒ plain active m4_map([ m4_echo], [[[plain]], [[active]]]) ⇒ plain active m4_map_args([ m4_echo], [plain], [active]) ⇒ plain active In cases where it is useful to operate on additional parameters besides the list elements, the macro ‘m4_curry’ can be used in MACRO to supply the argument currying necessary to generate the desired argument list. In the following example, ‘list_add_n’ is more efficient than ‘list_add_x’. On the other hand, using ‘m4_map_args_sep’ can be even more efficient. m4_define([list], [[1], [2], [3]])dnl m4_define([add], [m4_eval(([$1]) + ([$2]))])dnl dnl list_add_n(N, ARG...) dnl Output a list consisting of each ARG added to N m4_define([list_add_n], [m4_shift(m4_map_args([,m4_curry([add], [$1])], m4_shift($@)))])dnl list_add_n([1], list) ⇒2,3,4 list_add_n([2], list) ⇒3,4,5 m4_define([list_add_x], [m4_shift(m4_foreach([var], m4_dquote(m4_shift($@)), [,add([$1],m4_defn([var]))]))])dnl list_add_x([1], list) ⇒2,3,4 -- Macro: m4_map_args_pair (MACRO, [MACRO-END = MACRO] ARG...) For every pair of arguments ARG, invoke MACRO with two arguments. If there is an odd number of arguments, invoke MACRO-END, which defaults to MACRO, with the remaining argument. m4_map_args_pair([, m4_reverse], [], [1], [2], [3]) ⇒, 2, 1, 3 m4_map_args_pair([, m4_reverse], [, m4_dquote], [1], [2], [3]) ⇒, 2, 1, [3] m4_map_args_pair([, m4_reverse], [, m4_dquote], [1], [2], [3], [4]) ⇒, 2, 1, 4, 3 -- Macro: m4_map_args_sep ([PRE], [POST], [SEP], ARG...) Expand the sequence ‘PRE[ARG]POST’ for each argument, additionally expanding SEP between arguments. One common use of this macro is constructing a macro call, where the opening and closing parentheses are split between PRE and POST; in particular, ‘m4_map_args([MACRO], [ARG])’ is equivalent to ‘m4_map_args_sep([MACRO(], [)], [], [ARG])’. This macro provides the most efficient means for iterating over an arbitrary list of arguments, particularly when repeatedly constructing a macro call with more arguments than ARG. -- Macro: m4_map_args_w (STRING, [PRE], [POST], [SEP]) Expand the sequence ‘PRE[word]POST’ for each word in the whitespace-separated STRING, additionally expanding SEP between words. This macro provides the most efficient means for iterating over a whitespace-separated string. In particular, ‘m4_map_args_w([STRING], [ACTION(], [)])’ is more efficient than ‘m4_foreach_w([var], [STRING], [ACTION(m4_defn([var]))])’. -- Macro: m4_shiftn (COUNT, ...) -- Macro: m4_shift2 (...) -- Macro: m4_shift3 (...) ‘m4_shiftn’ performs COUNT iterations of ‘m4_shift’, along with validation that enough arguments were passed in to match the shift count, and that the count is positive. ‘m4_shift2’ and ‘m4_shift3’ are specializations of ‘m4_shiftn’, introduced in Autoconf 2.62, and are more efficient for two and three shifts, respectively. -- Macro: m4_stack_foreach (MACRO, ACTION) -- Macro: m4_stack_foreach_lifo (MACRO, ACTION) For each of the ‘m4_pushdef’ definitions of MACRO, expand ACTION with the single argument of a definition of MACRO. ‘m4_stack_foreach’ starts with the oldest definition, while ‘m4_stack_foreach_lifo’ starts with the current definition. ACTION should not push or pop definitions of MACRO, nor is there any guarantee that the current definition of MACRO matches the argument that was passed to ACTION. The macro ‘m4_curry’ can be used if ACTION needs more than one argument, although in that case it is more efficient to use M4_STACK_FOREACH_SEP. Due to technical limitations, there are a few low-level m4sugar functions, such as ‘m4_pushdef’, that cannot be used as the MACRO argument. m4_pushdef([a], [1])m4_pushdef([a], [2])dnl m4_stack_foreach([a], [ m4_incr]) ⇒ 2 3 m4_stack_foreach_lifo([a], [ m4_curry([m4_substr], [abcd])]) ⇒ cd bcd -- Macro: m4_stack_foreach_sep (MACRO, [PRE], [POST], [SEP]) -- Macro: m4_stack_foreach_sep_lifo (MACRO, [PRE], [POST], [SEP]) Expand the sequence ‘PRE[definition]POST’ for each ‘m4_pushdef’ definition of MACRO, additionally expanding SEP between definitions. ‘m4_stack_foreach_sep’ visits the oldest definition first, while ‘m4_stack_foreach_sep_lifo’ visits the current definition first. This macro provides the most efficient means for iterating over a pushdef stack. In particular, ‘m4_stack_foreach([MACRO], [ACTION])’ is short for ‘m4_stack_foreach_sep([MACRO], [ACTION(], [)])’.  File: autoconf.info, Node: Evaluation Macros, Next: Text processing Macros, Prev: Looping constructs, Up: Programming in M4sugar 8.3.6 Evaluation Macros ----------------------- The following macros give some control over the order of the evaluation by adding or removing levels of quotes. -- Macro: m4_apply (MACRO, LIST) Apply the elements of the quoted, comma-separated LIST as the arguments to MACRO. If LIST is empty, invoke MACRO without arguments. Note the difference between ‘m4_indir’, which expects its first argument to be a macro name but can use names that are otherwise invalid, and ‘m4_apply’, where MACRO can contain other text, but must end in a valid macro name. m4_apply([m4_count], []) ⇒0 m4_apply([m4_count], [[]]) ⇒1 m4_apply([m4_count], [[1], [2]]) ⇒2 m4_apply([m4_join], [[|], [1], [2]]) ⇒1|2 -- Macro: m4_count (ARG, ...) This macro returns the decimal count of the number of arguments it was passed. -- Macro: m4_curry (MACRO, ARG...) This macro performs argument currying. The expansion of this macro is another macro name that expects exactly one argument; that argument is then appended to the ARG list, and then MACRO is expanded with the resulting argument list. m4_curry([m4_curry], [m4_reverse], [1])([2])([3]) ⇒3, 2, 1 Unfortunately, due to a limitation in M4 1.4.x, it is not possible to pass the definition of a builtin macro as the argument to the output of ‘m4_curry’; the empty string is used instead of the builtin token. This behavior is rectified by using M4 1.6 or newer. -- Macro: m4_do (ARG, ...) This macro loops over its arguments and expands each ARG in sequence. Its main use is for readability; it allows the use of indentation and fewer ‘dnl’ to result in the same expansion. This macro guarantees that no expansion will be concatenated with subsequent text; to achieve full concatenation, use ‘m4_unquote(m4_join([], ARG...))’. m4_define([ab],[1])m4_define([bc],[2])m4_define([abc],[3])dnl m4_do([a],[b])c ⇒abc m4_unquote(m4_join([],[a],[b]))c ⇒3 m4_define([a],[A])m4_define([b],[B])m4_define([c],[C])dnl m4_define([AB],[4])m4_define([BC],[5])m4_define([ABC],[6])dnl m4_do([a],[b])c ⇒ABC m4_unquote(m4_join([],[a],[b]))c ⇒3 -- Macro: m4_dquote (ARG, ...) Return the arguments as a quoted list of quoted arguments. Conveniently, if there is just one ARG, this effectively adds a level of quoting. -- Macro: m4_dquote_elt (ARG, ...) Return the arguments as a series of double-quoted arguments. Whereas ‘m4_dquote’ returns a single argument, ‘m4_dquote_elt’ returns as many arguments as it was passed. -- Macro: m4_echo (ARG, ...) Return the arguments, with the same level of quoting. Other than discarding whitespace after unquoted commas, this macro is a no-op. -- Macro: m4_expand (ARG) Return the expansion of ARG as a quoted string. Whereas ‘m4_quote’ is designed to collect expanded text into a single argument, ‘m4_expand’ is designed to perform one level of expansion on quoted text. One distinction is in the treatment of whitespace following a comma in the original ARG. Any time multiple arguments are collected into one with ‘m4_quote’, the M4 argument collection rules discard the whitespace. However, with ‘m4_expand’, whitespace is preserved, even after the expansion of macros contained in ARG. Additionally, ‘m4_expand’ is able to expand text that would involve an unterminated comment, whereas expanding that same text as the argument to ‘m4_quote’ runs into difficulty in finding the end of the argument. Since manipulating diversions during argument collection is inherently unsafe, ‘m4_expand’ issues an error if ARG attempts to change the current diversion (*note Diversion support::). m4_define([active], [ACT, IVE])dnl m4_define([active2], [[ACT, IVE]])dnl m4_quote(active, active) ⇒ACT,IVE,ACT,IVE m4_expand([active, active]) ⇒ACT, IVE, ACT, IVE m4_quote(active2, active2) ⇒ACT, IVE,ACT, IVE m4_expand([active2, active2]) ⇒ACT, IVE, ACT, IVE m4_expand([# m4_echo]) ⇒# m4_echo m4_quote(# m4_echo) ) ⇒# m4_echo) ⇒ Note that ‘m4_expand’ cannot handle an ARG that expands to literal unbalanced quotes, but that quadrigraphs can be used when unbalanced output is necessary. Likewise, unbalanced parentheses should be supplied with double quoting or a quadrigraph. m4_define([pattern], [[!@<:@]])dnl m4_define([bar], [BAR])dnl m4_expand([case $foo in m4_defn([pattern])@:}@ bar ;; *[)] blah ;; esac]) ⇒case $foo in ⇒ [![]) BAR ;; ⇒ *) blah ;; ⇒esac -- Macro: m4_ignore (...) This macro was introduced in Autoconf 2.62. Expands to nothing, ignoring all of its arguments. By itself, this isn’t very useful. However, it can be used to conditionally ignore an arbitrary number of arguments, by deciding which macro name to apply to a list of arguments. dnl foo outputs a message only if [debug] is defined. m4_define([foo], [m4_ifdef([debug],[AC_MSG_NOTICE],[m4_ignore])([debug message])]) Note that for earlier versions of Autoconf, the macro ‘__gnu__’ can serve the same purpose, although it is less readable. -- Macro: m4_make_list (ARG, ...) This macro exists to aid debugging of M4sugar algorithms. Its net effect is similar to ‘m4_dquote’—it produces a quoted list of quoted arguments, for each ARG. The difference is that this version uses a comma-newline separator instead of just comma, to improve readability of the list; with the result that it is less efficient than ‘m4_dquote’. m4_define([zero],[0])m4_define([one],[1])m4_define([two],[2])dnl m4_dquote(zero, [one], [[two]]) ⇒[0],[one],[[two]] m4_make_list(zero, [one], [[two]]) ⇒[0], ⇒[one], ⇒[[two]] m4_foreach([number], m4_dquote(zero, [one], [[two]]), [ number]) ⇒ 0 1 two m4_foreach([number], m4_make_list(zero, [one], [[two]]), [ number]) ⇒ 0 1 two -- Macro: m4_quote (ARG, ...) Return the arguments as a single entity, i.e., wrap them into a pair of quotes. This effectively collapses multiple arguments into one, although it loses whitespace after unquoted commas in the process. -- Macro: m4_reverse (ARG, ...) Outputs each argument with the same level of quoting, but in reverse order, and with space following each comma for readability. m4_define([active], [ACT,IVE]) ⇒ m4_reverse(active, [active]) ⇒active, IVE, ACT -- Macro: m4_unquote (ARG, ...) This macro was introduced in Autoconf 2.62. Expand each argument, separated by commas. For a single ARG, this effectively removes a layer of quoting, and ‘m4_unquote([ARG])’ is more efficient than the equivalent ‘m4_do([ARG])’. For multiple arguments, this results in an unquoted list of expansions. This is commonly used with ‘m4_split’, in order to convert a single quoted list into a series of quoted elements. The following example aims at emphasizing the difference between several scenarios: not using these macros, using ‘m4_defn’, using ‘m4_quote’, using ‘m4_dquote’, and using ‘m4_expand’. $ cat example.m4 dnl Overquote, so that quotes are visible. m4_define([show], [$[]1 = [$1], $[]@ = [$@]]) m4_define([a], [A]) m4_define([mkargs], [1, 2[,] 3]) m4_define([arg1], [[$1]]) m4_divert([0])dnl show(a, b) show([a, b]) show(m4_quote(a, b)) show(m4_dquote(a, b)) show(m4_expand([a, b])) arg1(mkargs) arg1([mkargs]) arg1(m4_defn([mkargs])) arg1(m4_quote(mkargs)) arg1(m4_dquote(mkargs)) arg1(m4_expand([mkargs])) $ autom4te -l m4sugar example.m4 $1 = A, $@ = [A],[b] $1 = a, b, $@ = [a, b] $1 = A,b, $@ = [A,b] $1 = [A],[b], $@ = [[A],[b]] $1 = A, b, $@ = [A, b] 1 mkargs 1, 2[,] 3 1,2, 3 [1],[2, 3] 1, 2, 3  File: autoconf.info, Node: Text processing Macros, Next: Number processing Macros, Prev: Evaluation Macros, Up: Programming in M4sugar 8.3.7 String manipulation in M4 ------------------------------- The following macros may be used to manipulate strings in M4. Many of the macros in this section intentionally result in quoted strings as output, rather than subjecting the arguments to further expansions. As a result, if you are manipulating text that contains active M4 characters, the arguments are passed with single quoting rather than double. -- Macro: m4_append (MACRO-NAME, STRING, [SEPARATOR]) -- Macro: m4_append_uniq (MACRO-NAME, STRING, [SEPARATOR] [IF-UNIQ], [IF-DUPLICATE]) Redefine MACRO-NAME to its former contents with SEPARATOR and STRING added at the end. If MACRO-NAME was undefined before (but not if it was defined but empty), then no SEPARATOR is added. As of Autoconf 2.62, neither STRING nor SEPARATOR are expanded during this macro; instead, they are expanded when MACRO-NAME is invoked. ‘m4_append’ can be used to grow strings, and ‘m4_append_uniq’ to grow strings without duplicating substrings. Additionally, ‘m4_append_uniq’ takes two optional parameters as of Autoconf 2.62; IF-UNIQ is expanded if STRING was appended, and IF-DUPLICATE is expanded if STRING was already present. Also, ‘m4_append_uniq’ warns if SEPARATOR is not empty, but occurs within STRING, since that can lead to duplicates. Note that ‘m4_append’ can scale linearly in the length of the final string, depending on the quality of the underlying M4 implementation, while ‘m4_append_uniq’ has an inherent quadratic scaling factor. If an algorithm can tolerate duplicates in the final string, use the former for speed. If duplicates must be avoided, consider using ‘m4_set_add’ instead (*note Set manipulation Macros::). m4_define([active], [ACTIVE])dnl m4_append([sentence], [This is an])dnl m4_append([sentence], [ active ])dnl m4_append([sentence], [symbol.])dnl sentence ⇒This is an ACTIVE symbol. m4_undefine([active])dnl ⇒This is an active symbol. m4_append_uniq([list], [one], [, ], [new], [existing]) ⇒new m4_append_uniq([list], [one], [, ], [new], [existing]) ⇒existing m4_append_uniq([list], [two], [, ], [new], [existing]) ⇒new m4_append_uniq([list], [three], [, ], [new], [existing]) ⇒new m4_append_uniq([list], [two], [, ], [new], [existing]) ⇒existing list ⇒one, two, three m4_dquote(list) ⇒[one],[two],[three] m4_append([list2], [one], [[, ]])dnl m4_append_uniq([list2], [two], [[, ]])dnl m4_append([list2], [three], [[, ]])dnl list2 ⇒one, two, three m4_dquote(list2) ⇒[one, two, three] -- Macro: m4_append_uniq_w (MACRO-NAME, STRINGS) This macro was introduced in Autoconf 2.62. It is similar to ‘m4_append_uniq’, but treats STRINGS as a whitespace separated list of words to append, and only appends unique words. MACRO-NAME is updated with a single space between new words. m4_append_uniq_w([numbers], [1 1 2])dnl m4_append_uniq_w([numbers], [ 2 3 ])dnl numbers ⇒1 2 3 -- Macro: m4_chomp (STRING) -- Macro: m4_chomp_all (STRING) Output STRING in quotes, but without a trailing newline. The macro ‘m4_chomp’ is slightly faster, and removes at most one newline; the macro ‘m4_chomp_all’ removes all consecutive trailing newlines. Unlike ‘m4_flatten’, embedded newlines are left intact, and backslash does not influence the result. -- Macro: m4_combine ([SEPARATOR], PREFIX-LIST, [INFIX], SUFFIX-1, [SUFFIX-2], ...) This macro produces a quoted string containing the pairwise combination of every element of the quoted, comma-separated PREFIX-LIST, and every element from the SUFFIX arguments. Each pairwise combination is joined with INFIX in the middle, and successive pairs are joined by SEPARATOR. No expansion occurs on any of the arguments. No output occurs if either the PREFIX or SUFFIX list is empty, but the lists can contain empty elements. m4_define([a], [oops])dnl m4_combine([, ], [[a], [b], [c]], [-], [1], [2], [3]) ⇒a-1, a-2, a-3, b-1, b-2, b-3, c-1, c-2, c-3 m4_combine([, ], [[a], [b]], [-]) ⇒ m4_combine([, ], [[a], [b]], [-], []) ⇒a-, b- m4_combine([, ], [], [-], [1], [2]) ⇒ m4_combine([, ], [[]], [-], [1], [2]) ⇒-1, -2 -- Macro: m4_escape (STRING) Convert all instances of ‘[’, ‘]’, ‘#’, and ‘$’ within STRING into their respective quadrigraphs. The result is still a quoted string. -- Macro: m4_flatten (STRING) Flatten STRING into a single line. Delete all backslash-newline pairs, and replace all remaining newlines with a space. The result is still a quoted string. -- Macro: m4_join ([SEPARATOR], ARGS...) -- Macro: m4_joinall ([SEPARATOR], ARGS...) Concatenate each ARG, separated by SEPARATOR. ‘joinall’ uses every argument, while ‘join’ omits empty arguments so that there are no back-to-back separators in the output. The result is a quoted string. m4_define([active], [ACTIVE])dnl m4_join([|], [one], [], [active], [two]) ⇒one|active|two m4_joinall([|], [one], [], [active], [two]) ⇒one||active|two Note that if all you intend to do is join ARGS with commas between them, to form a quoted list suitable for ‘m4_foreach’, it is more efficient to use ‘m4_dquote’. -- Macro: m4_newline ([TEXT]) This macro was introduced in Autoconf 2.62, and expands to a newline, followed by any TEXT. It is primarily useful for maintaining macro formatting, and ensuring that M4 does not discard leading whitespace during argument collection. -- Macro: m4_normalize (STRING) Remove leading and trailing spaces and tabs, sequences of backslash-then-newline, and replace multiple spaces, tabs, and newlines with a single space. This is a combination of ‘m4_flatten’ and ‘m4_strip’. To determine if STRING consists only of bytes that would be removed by ‘m4_normalize’, you can use ‘m4_ifblank’. -- Macro: m4_re_escape (STRING) Backslash-escape all characters in STRING that are active in regexps. -- Macro: m4_split (STRING, [REGEXP = ‘[\t ]+’]) Split STRING into an M4 list of elements quoted by ‘[’ and ‘]’, while keeping white space at the beginning and at the end. If REGEXP is given, use it instead of ‘[\t ]+’ for splitting. If STRING is empty, the result is an empty list. -- Macro: m4_strip (STRING) Strip whitespace from STRING. Sequences of spaces and tabs are reduced to a single space, then leading and trailing spaces are removed. The result is still a quoted string. Note that this does not interfere with newlines; if you want newlines stripped as well, consider ‘m4_flatten’, or do it all at once with ‘m4_normalize’. To quickly test if STRING has only whitespace, use ‘m4_ifblank’. -- Macro: m4_text_box (MESSAGE, [FRAME = ‘-’]) Add a text box around MESSAGE, using FRAME as the border character above and below the message. The FRAME argument must be a single byte, and does not support quadrigraphs. The frame correctly accounts for the subsequent expansion of MESSAGE. For example: m4_define([macro], [abc])dnl m4_text_box([macro]) ⇒## --- ## ⇒## abc ## ⇒## --- ## The MESSAGE must contain balanced quotes and parentheses, although quadrigraphs can be used to work around this. -- Macro: m4_text_wrap (STRING, [PREFIX], [PREFIX1 = PREFIX] Break STRING into a series of whitespace-separated words, then output those words separated by spaces, and wrapping lines any time the output would exceed WIDTH columns. If given, PREFIX1 begins the first line, and PREFIX begins all wrapped lines. If PREFIX1 is longer than PREFIX, then the first line consists of just PREFIX1. If PREFIX is longer than PREFIX1, padding is inserted so that the first word of STRING begins at the same indentation as all wrapped lines. Note that using literal tab characters in any of the arguments will interfere with the calculation of width. No expansions occur on PREFIX, PREFIX1, or the words of STRING, although quadrigraphs are recognized. For some examples: m4_text_wrap([Short string */], [ ], [/* ], [20]) ⇒/* Short string */ m4_text_wrap([Much longer string */], [ ], [/* ], [20]) ⇒/* Much longer ⇒ string */ m4_text_wrap([Short doc.], [ ], [ --short ], [30]) ⇒ --short Short doc. m4_text_wrap([Short doc.], [ ], [ --too-wide ], [30]) ⇒ --too-wide ⇒ Short doc. m4_text_wrap([Super long documentation.], [ ], [ --too-wide ], 30) ⇒ --too-wide ⇒ Super long ⇒ documentation. -- Macro: m4_tolower (STRING) -- Macro: m4_toupper (STRING) Return STRING with letters converted to upper or lower case, respectively.  File: autoconf.info, Node: Number processing Macros, Next: Set manipulation Macros, Prev: Text processing Macros, Up: Programming in M4sugar 8.3.8 Arithmetic computation in M4 ---------------------------------- The following macros facilitate integer arithmetic operations. Where a parameter is documented as taking an arithmetic expression, you can use anything that can be parsed by ‘m4_eval’. -- Macro: m4_cmp (EXPR-1, EXPR-2) Compare the arithmetic expressions EXPR-1 and EXPR-2, and expand to ‘-1’ if EXPR-1 is smaller, ‘0’ if they are equal, and ‘1’ if EXPR-1 is larger. -- Macro: m4_list_cmp (LIST-1, LIST-2) Compare the two M4 lists consisting of comma-separated arithmetic expressions, left to right. Expand to ‘-1’ for the first element pairing where the value from LIST-1 is smaller, ‘1’ where the value from LIST-2 is smaller, or ‘0’ if both lists have the same values. If one list is shorter than the other, the remaining elements of the longer list are compared against zero. m4_list_cmp([1, 0], [1]) ⇒0 m4_list_cmp([1, [1 * 0]], [1, 0]) ⇒0 m4_list_cmp([1, 2], [1, 0]) ⇒1 m4_list_cmp([1, [1+1], 3],[1, 2]) ⇒1 m4_list_cmp([1, 2, -3], [1, 2]) ⇒-1 m4_list_cmp([1, 0], [1, 2]) ⇒-1 m4_list_cmp([1], [1, 2]) ⇒-1 -- Macro: m4_max (ARG, ...) This macro was introduced in Autoconf 2.62. Expand to the decimal value of the maximum arithmetic expression among all the arguments. -- Macro: m4_min (ARG, ...) This macro was introduced in Autoconf 2.62. Expand to the decimal value of the minimum arithmetic expression among all the arguments. -- Macro: m4_sign (EXPR) Expand to ‘-1’ if the arithmetic expression EXPR is negative, ‘1’ if it is positive, and ‘0’ if it is zero. -- Macro: m4_version_compare (VERSION-1, VERSION-2) This macro was introduced in Autoconf 2.53, but had a number of usability limitations that were not lifted until Autoconf 2.62. Compare the version strings VERSION-1 and VERSION-2, and expand to ‘-1’ if VERSION-1 is smaller, ‘0’ if they are the same, or ‘1’ VERSION-2 is smaller. Version strings must be a list of elements separated by ‘.’, ‘,’ or ‘-’, where each element is a number along with optional case-insensitive letters designating beta releases. The comparison stops at the leftmost element that contains a difference, although a 0 element compares equal to a missing element. It is permissible to include commit identifiers in VERSION, such as an abbreviated SHA1 of the commit, provided there is still a monotonically increasing prefix to allow for accurate version-based comparisons. For example, this paragraph was written when the development snapshot of autoconf claimed to be at version ‘2.61a-248-dc51’, or 248 commits after the 2.61a release, with an abbreviated commit identification of ‘dc51’. m4_version_compare([1.1], [2.0]) ⇒-1 m4_version_compare([2.0b], [2.0a]) ⇒1 m4_version_compare([1.1.1], [1.1.1a]) ⇒-1 m4_version_compare([1.2], [1.1.1a]) ⇒1 m4_version_compare([1.0], [1]) ⇒0 m4_version_compare([1.1pre], [1.1PRE]) ⇒0 m4_version_compare([1.1a], [1,10]) ⇒-1 m4_version_compare([2.61a], [2.61a-248-dc51]) ⇒-1 m4_version_compare([2.61b], [2.61a-248-dc51]) ⇒1 -- Macro: m4_version_prereq (VERSION, [IF-NEW-ENOUGH], [IF-OLD = ‘m4_fatal’]) Compares VERSION against the version of Autoconf currently running. If the running version is at VERSION or newer, expand IF-NEW-ENOUGH, but if VERSION is larger than the version currently executing, expand IF-OLD, which defaults to printing an error message and exiting m4sugar with status 63. When given only one argument, this behaves like ‘AC_PREREQ’ (*note Versioning::). Remember that the autoconf philosophy favors feature checks over version checks.  File: autoconf.info, Node: Set manipulation Macros, Next: Forbidden Patterns, Prev: Number processing Macros, Up: Programming in M4sugar 8.3.9 Set manipulation in M4 ---------------------------- Sometimes, it is necessary to track a set of data, where the order does not matter and where there are no duplicates in the set. The following macros facilitate set manipulations. Each set is an opaque object, which can only be accessed via these basic operations. The underlying implementation guarantees linear scaling for set creation, which is more efficient than using the quadratic ‘m4_append_uniq’. Both set names and values can be arbitrary strings, except for unbalanced quotes. This implementation ties up memory for removed elements until the next operation that must traverse all the elements of a set; and although that may slow down some operations until the memory for removed elements is pruned, it still guarantees linear performance. -- Macro: m4_set_add (SET, VALUE, [IF-UNIQ], [IF-DUP]) Adds the string VALUE as a member of set SET. Expand IF-UNIQ if the element was added, or IF-DUP if it was previously in the set. Operates in amortized constant time, so that set creation scales linearly. -- Macro: m4_set_add_all (SET, VALUE...) Adds each VALUE to the set SET. This is slightly more efficient than repeatedly invoking ‘m4_set_add’. -- Macro: m4_set_contains (SET, VALUE, [IF-PRESENT], [IF-ABSENT]) Expands IF-PRESENT if the string VALUE is a member of SET, otherwise IF-ABSENT. m4_set_contains([a], [1], [yes], [no]) ⇒no m4_set_add([a], [1], [added], [dup]) ⇒added m4_set_add([a], [1], [added], [dup]) ⇒dup m4_set_contains([a], [1], [yes], [no]) ⇒yes m4_set_remove([a], [1], [removed], [missing]) ⇒removed m4_set_contains([a], [1], [yes], [no]) ⇒no m4_set_remove([a], [1], [removed], [missing]) ⇒missing -- Macro: m4_set_contents (SET, [SEP]) -- Macro: m4_set_dump (SET, [SEP]) Expands to a single string consisting of all the members of the set SET, each separated by SEP, which is not expanded. ‘m4_set_contents’ leaves the elements in SET but reclaims any memory occupied by removed elements, while ‘m4_set_dump’ is a faster one-shot action that also deletes the set. No provision is made for disambiguating members that contain a non-empty SEP as a substring; use ‘m4_set_empty’ to distinguish between an empty set and the set containing only the empty string. The order of the output is unspecified; in the current implementation, part of the speed of ‘m4_set_dump’ results from using a different output order than ‘m4_set_contents’. These macros scale linearly in the size of the set before memory pruning, and ‘m4_set_contents([SET], [SEP])’ is faster than ‘m4_joinall([SEP]m4_set_listc([SET]))’. m4_set_add_all([a], [1], [2], [3]) ⇒ m4_set_contents([a], [-]) ⇒1-2-3 m4_joinall([-]m4_set_listc([a])) ⇒1-2-3 m4_set_dump([a], [-]) ⇒3-2-1 m4_set_contents([a]) ⇒ m4_set_add([a], []) ⇒ m4_set_contents([a], [-]) ⇒ -- Macro: m4_set_delete (SET) Delete all elements and memory associated with SET. This is linear in the set size, and faster than removing one element at a time. -- Macro: m4_set_difference (SETA, SETB) -- Macro: m4_set_intersection (SETA, SETB) -- Macro: m4_set_union (SETA, SETB) Compute the relation between SETA and SETB, and output the result as a list of quoted arguments without duplicates and with a leading comma. Set difference selects the elements in SETA but not SETB, intersection selects only elements in both sets, and union selects elements in either set. These actions are linear in the sum of the set sizes. The leading comma is necessary to distinguish between no elements and the empty string as the only element. m4_set_add_all([a], [1], [2], [3]) ⇒ m4_set_add_all([b], [3], [], [4]) ⇒ m4_set_difference([a], [b]) ⇒,1,2 m4_set_difference([b], [a]) ⇒,,4 m4_set_intersection([a], [b]) ⇒,3 m4_set_union([a], [b]) ⇒,1,2,3,,4 -- Macro: m4_set_empty (SET, [IF-EMPTY], [IF-ELEMENTS]) Expand IF-EMPTY if the set SET has no elements, otherwise expand IF-ELEMENTS. This macro operates in constant time. Using this macro can help disambiguate output from ‘m4_set_contents’ or ‘m4_set_list’. -- Macro: m4_set_foreach (SET, VARIABLE, ACTION) For each element in the set SET, expand ACTION with the macro VARIABLE defined as the set element. Behavior is unspecified if ACTION recursively lists the contents of SET (although listing other sets is acceptable), or if it modifies the set in any way other than removing the element currently contained in VARIABLE. This macro is faster than the corresponding ‘m4_foreach([VARIABLE], m4_indir([m4_dquote]m4_set_listc([SET])), [ACTION])’, although ‘m4_set_map’ might be faster still. m4_set_add_all([a]m4_for([i], [1], [5], [], [,i])) ⇒ m4_set_contents([a]) ⇒12345 m4_set_foreach([a], [i], [m4_if(m4_eval(i&1), [0], [m4_set_remove([a], i, [i])])]) ⇒24 m4_set_contents([a]) ⇒135 -- Macro: m4_set_list (SET) -- Macro: m4_set_listc (SET) Produce a list of arguments, where each argument is a quoted element from the set SET. The variant ‘m4_set_listc’ is unambiguous, by adding a leading comma if there are any set elements, whereas the variant ‘m4_set_list’ cannot distinguish between an empty set and a set containing only the empty string. These can be directly used in macros that take multiple arguments, such as ‘m4_join’ or ‘m4_set_add_all’, or wrapped by ‘m4_dquote’ for macros that take a quoted list, such as ‘m4_map’ or ‘m4_foreach’. Any memory occupied by removed elements is reclaimed during these macros. m4_set_add_all([a], [1], [2], [3]) ⇒ m4_set_list([a]) ⇒1,2,3 m4_set_list([b]) ⇒ m4_set_listc([b]) ⇒ m4_count(m4_set_list([b])) ⇒1 m4_set_empty([b], [0], [m4_count(m4_set_list([b]))]) ⇒0 m4_set_add([b], []) ⇒ m4_set_list([b]) ⇒ m4_set_listc([b]) ⇒, m4_count(m4_set_list([b])) ⇒1 m4_set_empty([b], [0], [m4_count(m4_set_list([b]))]) ⇒1 -- Macro: m4_set_map (SET, ACTION) For each element in the set SET, expand ACTION with a single argument of the set element. Behavior is unspecified if ACTION recursively lists the contents of SET (although listing other sets is acceptable), or if it modifies the set in any way other than removing the element passed as an argument. This macro is faster than either corresponding counterpart of ‘m4_map_args([ACTION]m4_set_listc([SET]))’ or ‘m4_set_foreach([SET], [var], [ACTION(m4_defn([var]))])’. It is possible to use ‘m4_curry’ if more than one argument is needed for ACTION, although it is more efficient to use ‘m4_set_map_sep’ in that case. -- Macro: m4_set_map_sep (SET, [PRE], [POST], [SEP]) For each element in the set SET, expand ‘PRE[element]POST’, additionally expanding SEP between elements. Behavior is unspecified if the expansion recursively lists the contents of SET (although listing other sets is acceptable), or if it modifies the set in any way other than removing the element visited by the expansion. This macro provides the most efficient means for non-destructively visiting the elements of a set; in particular, ‘m4_set_map([SET], [ACTION])’ is equivalent to ‘m4_set_map_sep([SET], [ACTION(], [)])’. -- Macro: m4_set_remove (SET, VALUE, [IF-PRESENT], [IF-ABSENT]) If VALUE is an element in the set SET, then remove it and expand IF-PRESENT. Otherwise expand IF-ABSENT. This macro operates in constant time so that multiple removals will scale linearly rather than quadratically; but when used outside of ‘m4_set_foreach’ or ‘m4_set_map’, it leaves memory occupied until the set is later compacted by ‘m4_set_contents’ or ‘m4_set_list’. Several other set operations are then less efficient between the time of element removal and subsequent memory compaction, but still maintain their guaranteed scaling performance. -- Macro: m4_set_size (SET) Expand to the size of the set SET. This implementation operates in constant time, and is thus more efficient than ‘m4_eval(m4_count(m4_set_listc([set])) - 1)’.  File: autoconf.info, Node: Forbidden Patterns, Prev: Set manipulation Macros, Up: Programming in M4sugar 8.3.10 Forbidden Patterns ------------------------- M4sugar provides a means to define suspicious patterns, patterns describing tokens which should not be found in the output. For instance, if an Autoconf ‘configure’ script includes tokens such as ‘AC_DEFINE’, or ‘dnl’, then most probably something went wrong (typically a macro was not evaluated because of overquotation). M4sugar forbids all the tokens matching ‘^_?m4_’ and ‘^dnl$’. Additional layers, such as M4sh and Autoconf, add additional forbidden patterns to the list. -- Macro: m4_pattern_forbid (PATTERN) Declare that no token matching PATTERN must be found in the output. The output file is (temporarily) split into one word per line as part of the ‘autom4te’ post-processing, with each line (and therefore word) then being checked against the Perl regular expression PATTERN. If the regular expression matches, and ‘m4_pattern_allow’ does not also match, then an error is raised. Comments are not checked; this can be a problem if, for instance, you have some macro left unexpanded after an ‘#include’. No consensus is currently found in the Autoconf community, as some people consider it should be valid to name macros in comments (which doesn’t make sense to the authors of this documentation: input, such as macros, should be documented by ‘dnl’ comments; reserving ‘#’-comments to document the output). As an example, if you define your own macros that begin with ‘M_’ and are composed from capital letters and underscores, the specification of ‘m4_pattern_forbid([^M_[A-Z_]+])’ will ensure all your macros are expanded when not used in comments. As an example of a common use of this macro, consider what happens in packages that want to use the ‘pkg-config’ script via the third-party ‘PKG_CHECK_MODULES’ macro. By default, if a developer checks out the development tree but has not yet installed the pkg-config macros locally, they can manage to successfully run ‘autoconf’ on the package, but the resulting ‘configure’ file will likely result in a confusing shell message about a syntax error on the line mentioning the unexpanded ‘PKG_CHECK_MODULES’ macro. On the other hand, if ‘configure.ac’ includes ‘m4_pattern_forbid([^PKG_])’, the missing pkg-config macros will be detected immediately without allowing ‘autoconf’ to succeed. Of course, you might encounter exceptions to these generic rules, for instance you might have to refer to ‘$m4_flags’. -- Macro: m4_pattern_allow (PATTERN) Any token matching PATTERN is allowed, including if it matches an ‘m4_pattern_forbid’ pattern. For example, Gnulib uses ‘m4_pattern_forbid([^gl_])’ to reserve the ‘gl_’ namespace for itself, but also uses ‘m4_pattern_allow([^gl_ES$])’ to avoid a false negative on the valid locale name.  File: autoconf.info, Node: Debugging via autom4te, Prev: Programming in M4sugar, Up: Programming in M4 8.4 Debugging via autom4te ========================== At times, it is desirable to see what was happening inside m4, to see why output was not matching expectations. However, post-processing done by ‘autom4te’ means that directly using the m4 builtin ‘m4_traceon’ is likely to interfere with operation. Also, frequent diversion changes and the concept of forbidden tokens make it difficult to use ‘m4_defn’ to generate inline comments in the final output. There are a couple of tools to help with this. One is the use of the ‘--trace’ option provided by ‘autom4te’ (as well as each of the programs that wrap ‘autom4te’, such as ‘autoconf’), in order to inspect when a macro is called and with which arguments. For example, when this paragraph was written, the autoconf version could be found by: $ autoconf --trace=AC_INIT configure.ac:23:AC_INIT:GNU Autoconf:2.63b.95-3963:bug-autoconf@gnu.org $ autoconf --trace='AC_INIT:version is $2' version is 2.63b.95-3963 Another trick is to print out the expansion of various m4 expressions to standard error or to an independent file, with no further m4 expansion, and without interfering with diversion changes or the post-processing done to standard output. ‘m4_errprintn’ shows a given expression on standard error. For example, if you want to see the expansion of an autoconf primitive or of one of your autoconf macros, you can do it like this: $ cat <<\EOF > configure.ac AC_INIT m4_errprintn([The definition of AC_DEFINE_UNQUOTED:]) m4_errprintn(m4_defn([AC_DEFINE_UNQUOTED])) AC_OUTPUT EOF $ autoconf error→The definition of AC_DEFINE_UNQUOTED: error→_AC_DEFINE_Q([], $@)  File: autoconf.info, Node: Programming in M4sh, Next: Writing Autoconf Macros, Prev: Programming in M4, Up: Top 9 Programming in M4sh ********************* M4sh, pronounced “mash”, is aiming at producing portable Bourne shell scripts. This name was coined by Lars J. Aas, who notes that, according to the Webster’s Revised Unabridged Dictionary (1913): Mash \Mash\, n. [Akin to G. meisch, maisch, meische, maische, mash, wash, and prob. to AS. miscian to mix. See “Mix”.] 1. A mass of mixed ingredients reduced to a soft pulpy state by beating or pressure... 2. A mixture of meal or bran and water fed to animals. 3. A mess; trouble. [Obs.] –Beau. & Fl. M4sh reserves the M4 macro namespace ‘^_AS_’ for internal use, and the namespace ‘^AS_’ for M4sh macros. It also reserves the shell and environment variable namespace ‘^as_’, and the here-document delimiter namespace ‘^_AS[A-Z]’ in the output file. You should not define your own macros or output shell code that conflicts with these namespaces. * Menu: * Common Shell Constructs:: Portability layer for common shell constructs * Polymorphic Variables:: Support for indirect variable names * Initialization Macros:: Macros to establish a sane shell environment * File Descriptor Macros:: File descriptor macros for input and output  File: autoconf.info, Node: Common Shell Constructs, Next: Polymorphic Variables, Up: Programming in M4sh 9.1 Common Shell Constructs =========================== M4sh provides portable alternatives for some common shell constructs that unfortunately are not portable in practice. -- Macro: AS_BOX (TEXT, [CHAR = ‘-’]) Expand into shell code that will output TEXT surrounded by a box with CHAR in the top and bottom border. TEXT should not contain a newline, but may contain shell expansions valid for unquoted here-documents. CHAR defaults to ‘-’, but can be any character except ‘/’, ‘'’, ‘"’, ‘\’, ‘&’, or ‘`’. This is useful for outputting a comment box into log files to separate distinct phases of script operation. -- Macro: AS_CASE (WORD, [PATTERN1], [IF-MATCHED1], ..., [DEFAULT]) Expand into a shell ‘case’ statement, where WORD is matched against one or more patterns. IF-MATCHED is run if the corresponding pattern matched WORD, else DEFAULT is run. *Note Prerequisite Macros:: for why this macro should be used instead of plain ‘case’ in code outside of an ‘AC_DEFUN’ macro, when the contents of the ‘case’ use ‘AC_REQUIRE’ directly or indirectly. *Note Limitations of Shell Builtins: case, for how this macro avoids some portability issues. *Note Balancing Parentheses:: for how this macro lets you write code with balanced parentheses even if your code must run on obsolescent shells. -- Macro: AS_DIRNAME (FILE-NAME) Output the directory portion of FILE-NAME. For example, if ‘$file’ is ‘/one/two/three’, the command ‘dir=`AS_DIRNAME(["$file"])`’ sets ‘dir’ to ‘/one/two’. This interface may be improved in the future to avoid forks and losing trailing newlines. -- Macro: AS_ECHO (WORD) Emits WORD to the standard output, followed by a newline. WORD must be a single shell word (typically a quoted string). The bytes of WORD are output as-is, even if it starts with "-" or contains "\". Redirections can be placed outside the macro invocation. This is much more portable than using ‘echo’ (*note Limitations of Shell Builtins: echo.). -- Macro: AS_ECHO_N (WORD) Emits WORD to the standard output, without a following newline. WORD must be a single shell word (typically a quoted string) and, for portability, should not include more than one newline. The bytes of WORD are output as-is, even if it starts with "-" or contains "\". Redirections can be placed outside the macro invocation. -- Macro: AS_ESCAPE (STRING, [CHARS = ‘`\"$’]) Expands to STRING, with any characters in CHARS escaped with a backslash (‘\’). CHARS should be at most four bytes long, and only contain characters from the set ‘`\"$’; however, characters may be safely listed more than once in CHARS for the sake of syntax highlighting editors. The current implementation expands STRING after adding escapes; if STRING contains macro calls that in turn expand to text needing shell quoting, you can use ‘AS_ESCAPE(m4_dquote(m4_expand([string])))’. The default for CHARS (‘\"$`’) is the set of characters needing escapes when STRING will be used literally within double quotes. One common variant is the set of characters to protect when STRING will be used literally within back-ticks or an unquoted here-document (‘\$`’). Another common variant is ‘""’, which can be used to form a double-quoted string containing the same expansions that would have occurred if STRING were expanded in an unquoted here-document; however, when using this variant, care must be taken that STRING does not use double quotes within complex variable expansions (such as ‘${foo-`echo "hi"`}’) that would be broken with improper escapes. This macro is often used with ‘AS_ECHO’. For an example, observe the output generated by the shell code generated from this snippet: foo=bar AS_ECHO(["AS_ESCAPE(["$foo" = ])AS_ESCAPE(["$foo"], [""])"]) ⇒"$foo" = "bar" m4_define([macro], [a, [\b]]) AS_ECHO(["AS_ESCAPE([[macro]])"]) ⇒macro AS_ECHO(["AS_ESCAPE([macro])"]) ⇒a, b AS_ECHO(["AS_ESCAPE(m4_dquote(m4_expand([macro])))"]) ⇒a, \b To escape a string that will be placed within single quotes, use: m4_bpatsubst([[STRING]], ['], ['\\'']) -- Macro: AS_EXECUTABLE_P (FILE) Emit code to probe whether FILE is a regular file with executable permissions (and not a directory with search permissions). The caller is responsible for quoting FILE. -- Macro: AS_EXIT ([STATUS = ‘$?’]) Emit code to exit the shell with STATUS, defaulting to ‘$?’. This macro works around shells that see the exit status of the command prior to ‘exit’ inside a ‘trap 0’ handler (*note Limitations of Shell Builtins: trap.). -- Macro: AS_IF (TEST1, [RUN-IF-TRUE1], ..., [RUN-IF-FALSE]) Run shell code TEST1. If TEST1 exits with a zero status then run shell code RUN-IF-TRUE1, else examine further tests. If no test exits with a zero status, run shell code RUN-IF-FALSE, with simplifications if either RUN-IF-TRUE1 or RUN-IF-FALSE is empty. For example, AS_IF([test "x$foo" = xyes], [HANDLE_FOO([yes])], [test "x$foo" != xno], [HANDLE_FOO([maybe])], [echo foo not specified]) ensures any required macros of ‘HANDLE_FOO’ are expanded before the first test. This macro should be used instead of plain ‘if’ in code outside of an ‘AC_DEFUN’ macro, when the contents of the ‘if’ use ‘AC_REQUIRE’ directly or indirectly (*note Prerequisite Macros::). -- Macro: AS_MKDIR_P (FILE-NAME) Make the directory FILE-NAME, including intervening directories as necessary. This is equivalent to ‘mkdir -p -- FILE-NAME’, except that it is portable to older versions of ‘mkdir’ that lack support for the ‘-p’ option or for the ‘--’ delimiter (*note Limitations of Usual Tools: mkdir.). Also, ‘AS_MKDIR_P’ succeeds if FILE-NAME is a symbolic link to an existing directory, even though Posix is unclear whether ‘mkdir -p’ should succeed in that case. If creation of FILE-NAME fails, exit the script. Also see the ‘AC_PROG_MKDIR_P’ macro (*note Particular Programs::). -- Macro: AS_SET_STATUS (STATUS) Emit shell code to set the value of ‘$?’ to STATUS, as efficiently as possible. However, this is not guaranteed to abort a shell running with ‘set -e’ (*note Limitations of Shell Builtins: set.). This should also be used at the end of a complex shell function instead of ‘return’ (*note Shell Functions::) to avoid a DJGPP shell bug. -- Macro: AS_TR_CPP (EXPRESSION) Transform EXPRESSION into a valid right-hand side for a C ‘#define’. For example: # This outputs "#define HAVE_CHAR_P 1". # Notice the m4 quoting around #, to prevent an m4 comment type="char *" echo "[#]define AS_TR_CPP([HAVE_$type]) 1" -- Macro: AS_TR_SH (EXPRESSION) Transform EXPRESSION into shell code that generates a valid shell variable name. The result is literal when possible at m4 time, but must be used with ‘eval’ if EXPRESSION causes shell indirections. For example: # This outputs "Have it!". header="sys/some file.h" eval AS_TR_SH([HAVE_$header])=yes if test "x$HAVE_sys_some_file_h" = xyes; then echo "Have it!"; fi -- Macro: AS_SET_CATFILE (VAR, DIR, FILE) Set the polymorphic shell variable VAR to DIR/FILE, but optimizing the common cases (DIR or FILE is ‘.’, FILE is absolute, etc.). -- Macro: AS_UNSET (VAR) Unsets the shell variable VAR, working around bugs in older shells (*note Limitations of Shell Builtins: unset.). VAR can be a literal or indirect variable name. -- Macro: AS_VERSION_COMPARE (VERSION-1, VERSION-2, [ACTION-IF-LESS], [ACTION-IF-EQUAL], [ACTION-IF-GREATER]) Compare two strings VERSION-1 and VERSION-2, possibly containing shell variables, as version strings, and expand ACTION-IF-LESS, ACTION-IF-EQUAL, or ACTION-IF-GREATER depending upon the result. The algorithm to compare is similar to the one used by strverscmp in glibc (*note String/Array Comparison: (libc)String/Array Comparison.).  File: autoconf.info, Node: Polymorphic Variables, Next: Initialization Macros, Prev: Common Shell Constructs, Up: Programming in M4sh 9.2 Support for indirect variable names ======================================= Often, it is convenient to write a macro that will emit shell code operating on a shell variable. The simplest case is when the variable name is known. But a more powerful idiom is writing shell code that can work through an indirection, where another variable or command substitution produces the name of the variable to actually manipulate. M4sh supports the notion of polymorphic shell variables, making it easy to write a macro that can deal with either literal or indirect variable names and output shell code appropriate for both use cases. Behavior is undefined if expansion of an indirect variable does not result in a literal variable name. -- Macro: AS_LITERAL_IF (EXPRESSION, [IF-LITERAL], [IF-NOT], [IF-SIMPLE-REF = IF-NOT] -- Macro: AS_LITERAL_WORD_IF (EXPRESSION, [IF-LITERAL], [IF-NOT], [IF-SIMPLE-REF = IF-NOT] If the expansion of EXPRESSION is definitely a shell literal, expand IF-LITERAL. If the expansion of EXPRESSION looks like it might contain shell indirections (such as ‘$var’ or ‘`expr`’), then IF-NOT is expanded. Sometimes, it is possible to output optimized code if EXPRESSION consists only of shell variable expansions (such as ‘${var}’), in which case IF-SIMPLE-REF can be provided; but defaulting to IF-NOT should always be safe. ‘AS_LITERAL_WORD_IF’ only expands IF-LITERAL if EXPRESSION looks like a single shell word, containing no whitespace; while ‘AS_LITERAL_IF’ allows whitespace in EXPRESSION. In order to reduce the time spent recognizing whether an EXPRESSION qualifies as a literal or a simple indirection, the implementation is somewhat conservative: EXPRESSION must be a single shell word (possibly after stripping whitespace), consisting only of bytes that would have the same meaning whether unquoted or enclosed in double quotes (for example, ‘a.b’ results in IF-LITERAL, even though it is not a valid shell variable name; while both ‘'a'’ and ‘[$]’ result in IF-NOT, because they behave differently than ‘"'a'"’ and ‘"[$]"’). This macro can be used in contexts for recognizing portable file names (such as in the implementation of ‘AC_LIBSOURCE’), or coupled with some transliterations for forming valid variable names (such as in the implementation of ‘AS_TR_SH’, which uses an additional ‘m4_translit’ to convert ‘.’ to ‘_’). This example shows how to read the contents of the shell variable ‘bar’, exercising all three arguments to ‘AS_LITERAL_IF’. It results in a script that will output the line ‘hello’ three times. AC_DEFUN([MY_ACTION], [AS_LITERAL_IF([$1], [echo "$$1"], [AS_VAR_COPY([var], [$1]) echo "$var"], [eval 'echo "$'"$1"\"])]) foo=bar bar=hello MY_ACTION([bar]) MY_ACTION([`echo bar`]) MY_ACTION([$foo]) -- Macro: AS_VAR_APPEND (VAR, TEXT) Emit shell code to append the shell expansion of TEXT to the end of the current contents of the polymorphic shell variable VAR, taking advantage of shells that provide the ‘+=’ extension for more efficient scaling. For situations where the final contents of VAR are relatively short (less than 256 bytes), it is more efficient to use the simpler code sequence of ‘VAR=${VAR}TEXT’ (or its polymorphic equivalent of ‘AS_VAR_COPY([t], [VAR])’ and ‘AS_VAR_SET([VAR], ["$t"TEXT])’). But in the case when the script will be repeatedly appending text into ‘var’, issues of scaling start to become apparent. A naive implementation requires execution time linear to the length of the current contents of VAR as well as the length of TEXT for a single append, for an overall quadratic scaling with multiple appends. This macro takes advantage of shells which provide the extension ‘VAR+=TEXT’, which can provide amortized constant time for a single append, for an overall linear scaling with multiple appends. Note that unlike ‘AS_VAR_SET’, this macro requires that TEXT be quoted properly to avoid field splitting and file name expansion. -- Macro: AS_VAR_ARITH (VAR, EXPRESSION) Emit shell code to compute the arithmetic expansion of EXPRESSION, assigning the result as the contents of the polymorphic shell variable VAR. The code takes advantage of shells that provide ‘$(())’ for fewer forks, but uses ‘expr’ as a fallback. Therefore, the syntax for a valid EXPRESSION is rather limited: all operators must occur as separate shell arguments and with proper quoting, there is no portable equality operator, all variables containing numeric values must be expanded prior to the computation, all numeric values must be provided in decimal without leading zeroes, and the first shell argument should not be a negative number. In the following example, this snippet will print ‘(2+3)*4 == 20’. bar=3 AS_VAR_ARITH([foo], [\( 2 + $bar \) \* 4]) echo "(2+$bar)*4 == $foo" -- Macro: AS_VAR_COPY (DEST, SOURCE) Emit shell code to assign the contents of the polymorphic shell variable SOURCE to the polymorphic shell variable DEST. For example, executing this M4sh snippet will output ‘bar hi’: foo=bar bar=hi AS_VAR_COPY([a], [foo]) AS_VAR_COPY([b], [$foo]) echo "$a $b" When it is necessary to access the contents of an indirect variable inside a shell double-quoted context, the recommended idiom is to first copy the contents into a temporary literal shell variable. for header in stdint_h inttypes_h ; do AS_VAR_COPY([var], [ac_cv_header_$header]) echo "$header detected: $var" done -- Macro: AS_VAR_IF (VAR, [WORD], [IF-EQUAL], [IF-NOT-EQUAL]) Output a shell conditional statement. If the contents of the polymorphic shell variable VAR match the string WORD, execute IF-EQUAL; otherwise execute IF-NOT-EQUAL. WORD must be a single shell word (typically a quoted string). Avoids shell bugs if an interrupt signal arrives while a command substitution in VAR is being expanded. -- Macro: AS_VAR_PUSHDEF (M4-NAME, VALUE) -- Macro: AS_VAR_POPDEF (M4-NAME) A common M4sh idiom involves composing shell variable names from an m4 argument (for example, writing a macro that uses a cache variable). VALUE can be an arbitrary string, which will be transliterated into a valid shell name by ‘AS_TR_SH’. In order to access the composed variable name based on VALUE, it is easier to declare a temporary m4 macro M4-NAME with ‘AS_VAR_PUSHDEF’, then use that macro as the argument to subsequent ‘AS_VAR’ macros as a polymorphic variable name, and finally free the temporary macro with ‘AS_VAR_POPDEF’. These macros are often followed with ‘dnl’, to avoid excess newlines in the output. Here is an involved example, that shows the power of writing macros that can handle composed shell variable names: m4_define([MY_CHECK_HEADER], [AS_VAR_PUSHDEF([my_Header], [ac_cv_header_$1])dnl AS_VAR_IF([my_Header], [yes], [echo "header $1 detected"])dnl AS_VAR_POPDEF([my_Header])dnl ]) MY_CHECK_HEADER([stdint.h]) for header in inttypes.h stdlib.h ; do MY_CHECK_HEADER([$header]) done In the above example, ‘MY_CHECK_HEADER’ can operate on polymorphic variable names. In the first invocation, the m4 argument is ‘stdint.h’, which transliterates into a literal ‘stdint_h’. As a result, the temporary macro ‘my_Header’ expands to the literal shell name ‘ac_cv_header_stdint_h’. In the second invocation, the m4 argument to ‘MY_CHECK_HEADER’ is ‘$header’, and the temporary macro ‘my_Header’ expands to the indirect shell name ‘$as_my_Header’. During the shell execution of the for loop, when ‘$header’ contains ‘inttypes.h’, then ‘$as_my_Header’ contains ‘ac_cv_header_inttypes_h’. If this script is then run on a platform where all three headers have been previously detected, the output of the script will include: header stdint.h detected header inttypes.h detected header stdlib.h detected -- Macro: AS_VAR_SET (VAR, [VALUE]) Emit shell code to assign the contents of the polymorphic shell variable VAR to the shell expansion of VALUE. VALUE is not subject to field splitting or file name expansion, so if command substitution is used, it may be done with ‘`""`’ rather than using an intermediate variable (*note Shell Substitutions::). However, VALUE does undergo rescanning for additional macro names; behavior is unspecified if late expansion results in any shell meta-characters. -- Macro: AS_VAR_SET_IF (VAR, [IF-SET], [IF-UNDEF]) Emit a shell conditional statement, which executes IF-SET if the polymorphic shell variable ‘var’ is set to any value, and IF-UNDEF otherwise. -- Macro: AS_VAR_TEST_SET (VAR) Emit a shell statement that results in a successful exit status only if the polymorphic shell variable ‘var’ is set.  File: autoconf.info, Node: Initialization Macros, Next: File Descriptor Macros, Prev: Polymorphic Variables, Up: Programming in M4sh 9.3 Initialization Macros ========================= -- Macro: AS_BOURNE_COMPATIBLE Set up the shell to be more compatible with the Bourne shell as standardized by Posix, if possible. This may involve setting environment variables, or setting options, or similar implementation-specific actions. This macro is deprecated, since ‘AS_INIT’ already invokes it. -- Macro: AS_INIT Initialize the M4sh environment. This macro calls ‘m4_init’, then outputs the ‘#! /bin/sh’ line, a notice about where the output was generated from, and code to sanitize the environment for the rest of the script. Among other initializations, this sets ‘SHELL’ to the shell chosen to run the script (*note CONFIG_SHELL::), and ‘LC_ALL’ to ensure the C locale. Finally, it changes the current diversion to ‘BODY’. ‘AS_INIT’ is called automatically by ‘AC_INIT’ and ‘AT_INIT’, so shell code in ‘configure’, ‘config.status’, and ‘testsuite’ all benefit from a sanitized shell environment. -- Macro: AS_INIT_GENERATED (FILE, [COMMENT]) Emit shell code to start the creation of a subsidiary shell script in FILE, including changing FILE to be executable. This macro populates the child script with information learned from the parent (thus, the emitted code is equivalent in effect, but more efficient, than the code output by ‘AS_INIT’, ‘AS_BOURNE_COMPATIBLE’, and ‘AS_SHELL_SANITIZE’). If present, COMMENT is output near the beginning of the child, prior to the shell initialization code, and is subject to parameter expansion, command substitution, and backslash quote removal. The parent script should check the exit status after this macro, in case FILE could not be properly created (for example, if the disk was full). If successfully created, the parent script can then proceed to append additional M4sh constructs into the child script. Note that the child script starts life without a log file open, so if the parent script uses logging (*note AS_MESSAGE_LOG_FD::), you must temporarily disable any attempts to use the log file until after emitting code to open a log within the child. On the other hand, if the parent script has ‘AS_MESSAGE_FD’ redirected somewhere besides ‘1’, then the child script already has code that copies stdout to that descriptor. Currently, the suggested idiom for writing a M4sh shell script from within another script is: AS_INIT_GENERATED([FILE], [[# My child script. ]]) || { AS_ECHO(["Failed to create child script"]); AS_EXIT; } m4_pushdef([AS_MESSAGE_LOG_FD])dnl cat >> "FILE" <<\__EOF__ # Code to initialize AS_MESSAGE_LOG_FD m4_popdef([AS_MESSAGE_LOG_FD])dnl # Additional code __EOF__ This, however, may change in the future as the M4sh interface is stabilized further. Also, be aware that use of ‘LINENO’ within the child script may report line numbers relative to their location in the parent script, even when using ‘AS_LINENO_PREPARE’, if the parent script was unable to locate a shell with working ‘LINENO’ support. -- Macro: AS_LINENO_PREPARE Find a shell that supports the special variable ‘LINENO’, which contains the number of the currently executing line. This macro is automatically invoked by ‘AC_INIT’ in configure scripts. -- Macro: AS_ME_PREPARE Set up variable ‘as_me’ to be the basename of the currently executing script. This macro is automatically invoked by ‘AC_INIT’ in configure scripts. -- Macro: AS_TMPDIR (PREFIX, [DIR = ‘${TMPDIR:=/tmp}’]) Create, as safely as possible, a temporary sub-directory within DIR with a name starting with PREFIX. PREFIX should be 2–4 characters, to make it slightly easier to identify the owner of the directory. If DIR is omitted, then the value of ‘TMPDIR’ will be used (defaulting to ‘/tmp’). On success, the name of the newly created directory is stored in the shell variable ‘tmp’. On error, the script is aborted. Typically, this macro is coupled with some exit traps to delete the created directory and its contents on exit or interrupt. However, there is a slight window between when the directory is created and when the name is actually known to the shell, so an interrupt at the right moment might leave the temporary directory behind. Hence it is important to use a PREFIX that makes it easier to determine if a leftover temporary directory from an interrupted script is safe to delete. The use of the output variable ‘$tmp’ rather than something in the ‘as_’ namespace is historical; it has the unfortunate consequence that reusing this otherwise common name for any other purpose inside your script has the potential to break any cleanup traps designed to remove the temporary directory. -- Macro: AS_SHELL_SANITIZE Initialize the shell suitably for ‘configure’ scripts. This has the effect of ‘AS_BOURNE_COMPATIBLE’, and sets some other environment variables for predictable results from configuration tests. For example, it sets ‘LC_ALL’ to change to the default C locale. *Note Special Shell Variables::. This macro is deprecated, since ‘AS_INIT’ already invokes it.  File: autoconf.info, Node: File Descriptor Macros, Prev: Initialization Macros, Up: Programming in M4sh 9.4 File Descriptor Macros ========================== The following macros define file descriptors used to output messages (or input values) from ‘configure’ scripts. For example: echo "$wombats found" >&AS_MESSAGE_LOG_FD echo 'Enter desired kangaroo count:' >&AS_MESSAGE_FD read kangaroos <&AS_ORIGINAL_STDIN_FD` However doing so is seldom needed, because Autoconf provides higher level macros as described below. -- Macro: AS_MESSAGE_FD The file descriptor for ‘checking for...’ messages and results. By default, ‘AS_INIT’ sets this to ‘1’ for standalone M4sh clients. However, ‘AC_INIT’ shuffles things around to another file descriptor, in order to allow the ‘-q’ option of ‘configure’ to choose whether messages should go to the script’s standard output or be discarded. If you want to display some messages, consider using one of the printing macros (*note Printing Messages::) instead. Copies of messages output via these macros are also recorded in ‘config.log’. -- Macro: AS_MESSAGE_LOG_FD This must either be empty, or expand to a file descriptor for log messages. By default, ‘AS_INIT’ sets this macro to the empty string for standalone M4sh clients, thus disabling logging. However, ‘AC_INIT’ shuffles things around so that both ‘configure’ and ‘config.status’ use ‘config.log’ for log messages. Macros that run tools, like ‘AC_COMPILE_IFELSE’ (*note Running the Compiler::), redirect all output to this descriptor. You may want to do so if you develop such a low-level macro. -- Macro: AS_ORIGINAL_STDIN_FD This must expand to a file descriptor for the original standard input. By default, ‘AS_INIT’ sets this macro to ‘0’ for standalone M4sh clients. However, ‘AC_INIT’ shuffles things around for safety. When ‘configure’ runs, it may accidentally execute an interactive command that has the same name as the non-interactive meant to be used or checked. If the standard input was the terminal, such interactive programs would cause ‘configure’ to stop, pending some user input. Therefore ‘configure’ redirects its standard input from ‘/dev/null’ during its initialization. This is not normally a problem, since ‘configure’ normally does not need user input. In the extreme case where your ‘configure’ script really needs to obtain some values from the original standard input, you can read them explicitly from ‘AS_ORIGINAL_STDIN_FD’.  File: autoconf.info, Node: Writing Autoconf Macros, Next: Portable Shell, Prev: Programming in M4sh, Up: Top 10 Writing Autoconf Macros ************************** When you write a feature test that could be applicable to more than one software package, the best thing to do is encapsulate it in a new macro. Here are some instructions and guidelines for writing Autoconf macros. You should also familiarize yourself with M4sugar (*note Programming in M4::) and M4sh (*note Programming in M4sh::). * Menu: * Macro Definitions:: Basic format of an Autoconf macro * Macro Names:: What to call your new macros * Dependencies Between Macros:: What to do when macros depend on other macros * Obsoleting Macros:: Warning about old ways of doing things * Coding Style:: Writing Autoconf macros à la Autoconf  File: autoconf.info, Node: Macro Definitions, Next: Macro Names, Up: Writing Autoconf Macros 10.1 Macro Definitions ====================== -- Macro: AC_DEFUN (NAME, [BODY]) Autoconf macros are defined using the ‘AC_DEFUN’ macro, which is similar to the M4 builtin ‘m4_define’ macro; this creates a macro named NAME and with BODY as its expansion. In addition to defining a macro, ‘AC_DEFUN’ adds to it some code that is used to constrain the order in which macros are called, while avoiding redundant output (*note Prerequisite Macros::). An Autoconf macro definition looks like this: AC_DEFUN(MACRO-NAME, MACRO-BODY) You can refer to any arguments passed to the macro as ‘$1’, ‘$2’, etc. *Note How to define new macros: (m4)Definitions, for more complete information on writing M4 macros. Most macros fall in one of two general categories. The first category includes macros which take arguments, in order to generate output parameterized by those arguments. Macros in this category are designed to be directly expanded, often multiple times, and should not be used as the argument to ‘AC_REQUIRE’. The other category includes macros which are shorthand for a fixed block of text, and therefore do not take arguments. For this category of macros, directly expanding the macro multiple times results in redundant output, so it is more common to use the macro as the argument to ‘AC_REQUIRE’, or to declare the macro with ‘AC_DEFUN_ONCE’ (*note One-Shot Macros::). Be sure to properly quote both the MACRO-BODY _and_ the MACRO-NAME to avoid any problems if the macro happens to have been previously defined. Each macro should have a header comment that gives its prototype, and a brief description. When arguments have default values, display them in the prototype. For example: # AC_MSG_ERROR(ERROR, [EXIT-STATUS = 1]) # -------------------------------------- m4_define([AC_MSG_ERROR], [{ AS_MESSAGE([error: $1], [2]) exit m4_default([$2], [1]); }]) Comments about the macro should be left in the header comment. Most other comments make their way into ‘configure’, so just keep using ‘#’ to introduce comments. If you have some special comments about pure M4 code, comments that make no sense in ‘configure’ and in the header comment, then use the builtin ‘dnl’: it causes M4 to discard the text through the next newline. Keep in mind that ‘dnl’ is rarely needed to introduce comments; ‘dnl’ is more useful to get rid of the newlines following macros that produce no output, such as ‘AC_REQUIRE’. Public third-party macros need to use ‘AC_DEFUN’, and not ‘m4_define’, in order to be found by ‘aclocal’ (*note (automake)Extending aclocal::). Additionally, if it is ever determined that a macro should be made obsolete, it is easy to convert from ‘AC_DEFUN’ to ‘AU_DEFUN’ in order to have ‘autoupdate’ assist the user in choosing a better alternative, but there is no corresponding way to make ‘m4_define’ issue an upgrade notice (*note AU_DEFUN::). There is another subtle, but important, difference between using ‘m4_define’ and ‘AC_DEFUN’: only the former is unaffected by ‘AC_REQUIRE’. When writing a file, it is always safe to replace a block of text with a ‘m4_define’ macro that will expand to the same text. But replacing a block of text with an ‘AC_DEFUN’ macro with the same content does not necessarily give the same results, because it changes the location where any embedded but unsatisfied ‘AC_REQUIRE’ invocations within the block will be expanded. For an example of this, see *note Expanded Before Required::.  File: autoconf.info, Node: Macro Names, Next: Dependencies Between Macros, Prev: Macro Definitions, Up: Writing Autoconf Macros 10.2 Macro Names ================ All of the public Autoconf macros have all-uppercase names in the namespace ‘^AC_’ to prevent them from accidentally conflicting with other text; Autoconf also reserves the namespace ‘^_AC_’ for internal macros. All shell variables that they use for internal purposes have mostly-lowercase names starting with ‘ac_’. Autoconf also uses here-document delimiters in the namespace ‘^_AC[A-Z]’. During ‘configure’, files produced by Autoconf make heavy use of the file system namespace ‘^conf’. Since Autoconf is built on top of M4sugar (*note Programming in M4sugar::) and M4sh (*note Programming in M4sh::), you must also be aware of those namespaces (‘^_?\(m4\|AS\)_’). And since ‘configure.ac’ is also designed to be scanned by Autoheader, Autoscan, Autoupdate, and Automake, you should be aware of the ‘^_?A[HNUM]_’ namespaces. In general, you _should not use_ the namespace of a package that does not own the macro or shell code you are writing. To ensure that your macros don’t conflict with present or future Autoconf macros, you should prefix your own macro names and any shell variables they use with some other sequence. Possibilities include your initials, or an abbreviation for the name of your organization or software package. Historically, people have not always followed the rule of using a namespace appropriate for their package, and this has made it difficult for determining the origin of a macro (and where to report bugs about that macro), as well as difficult for the true namespace owner to add new macros without interference from pre-existing uses of third-party macros. Perhaps the best example of this confusion is the ‘AM_GNU_GETTEXT’ macro, which belongs, not to Automake, but to Gettext. Most of the Autoconf macros’ names follow a structured naming convention that indicates the kind of feature check by the name. The macro names consist of several words, separated by underscores, going from most general to most specific. The names of their cache variables use the same convention (*note Cache Variable Names::, for more information on them). The first word of the name after the namespace initials (such as ‘AC_’) usually tells the category of the feature being tested. Here are the categories used in Autoconf for specific test macros, the kind of macro that you are more likely to write. They are also used for cache variables, in all-lowercase. Use them where applicable; where they’re not, invent your own categories. ‘C’ C language builtin features. ‘DECL’ Declarations of C variables in header files. ‘FUNC’ Functions in libraries. ‘GROUP’ Posix group owners of files. ‘HEADER’ Header files. ‘LIB’ C libraries. ‘PROG’ The base names of programs. ‘MEMBER’ Members of aggregates. ‘SYS’ Operating system features. ‘TYPE’ C builtin or declared types. ‘VAR’ C variables in libraries. After the category comes the name of the particular feature being tested. Any further words in the macro name indicate particular aspects of the feature. For example, ‘AC_PROG_MAKE_SET’ checks whether ‘make’ sets a variable to its own name. An internal macro should have a name that starts with an underscore; Autoconf internals should therefore start with ‘_AC_’. Additionally, a macro that is an internal subroutine of another macro should have a name that starts with an underscore and the name of that other macro, followed by one or more words saying what the internal macro does. For example, ‘AC_PATH_X’ has internal macros ‘_AC_PATH_X_XMKMF’ and ‘_AC_PATH_X_DIRECT’.  File: autoconf.info, Node: Dependencies Between Macros, Next: Obsoleting Macros, Prev: Macro Names, Up: Writing Autoconf Macros 10.3 Dependencies Between Macros ================================ Some Autoconf macros depend on other macros having been called first in order to work correctly. Autoconf provides a way to ensure that certain macros are called if needed and a way to warn the user if macros are called in an order that might cause incorrect operation. * Menu: * Prerequisite Macros:: Ensuring required information * Suggested Ordering:: Warning about possible ordering problems * One-Shot Macros:: Ensuring a macro is called only once  File: autoconf.info, Node: Prerequisite Macros, Next: Suggested Ordering, Up: Dependencies Between Macros 10.3.1 Prerequisite Macros -------------------------- A macro that you write might need to use values that have previously been computed by other macros. For example, ‘AC_DECL_YYTEXT’ examines the output of ‘flex’ or ‘lex’, so it depends on ‘AC_PROG_LEX’ having been called first to set the shell variable ‘LEX’. Rather than forcing the user of the macros to keep track of the dependencies between them, you can use the ‘AC_REQUIRE’ macro to do it automatically. ‘AC_REQUIRE’ can ensure that a macro is only called if it is needed, and only called once. -- Macro: AC_REQUIRE (MACRO-NAME) If the M4 macro MACRO-NAME has not already been called, call it (without any arguments). Make sure to quote MACRO-NAME with square brackets. MACRO-NAME must have been defined using ‘AC_DEFUN’ or else contain a call to ‘AC_PROVIDE’ to indicate that it has been called. ‘AC_REQUIRE’ must be used inside a macro defined by ‘AC_DEFUN’; it must not be called from the top level. Also, it does not make sense to require a macro that takes parameters. ‘AC_REQUIRE’ is often misunderstood. It really implements dependencies between macros in the sense that if one macro depends upon another, the latter is expanded _before_ the body of the former. To be more precise, the required macro is expanded before the outermost defined macro in the current expansion stack. In particular, ‘AC_REQUIRE([FOO])’ is not replaced with the body of ‘FOO’. For instance, this definition of macros: AC_DEFUN([TRAVOLTA], [test "$body_temperature_in_celsius" -gt 38 && dance_floor=occupied]) AC_DEFUN([NEWTON_JOHN], [test "x$hair_style" = xcurly && dance_floor=occupied]) AC_DEFUN([RESERVE_DANCE_FLOOR], [if test "x`date +%A`" = xSaturday; then AC_REQUIRE([TRAVOLTA]) AC_REQUIRE([NEWTON_JOHN]) fi]) with this ‘configure.ac’ AC_INIT([Dance Manager], [1.0], [bug-dance@example.org]) RESERVE_DANCE_FLOOR if test "x$dance_floor" = xoccupied; then AC_MSG_ERROR([cannot pick up here, let's move]) fi does not leave you with a better chance to meet a kindred soul on days other than Saturday, since the call to ‘RESERVE_DANCE_FLOOR’ expands to: test "$body_temperature_in_Celsius" -gt 38 && dance_floor=occupied test "x$hair_style" = xcurly && dance_floor=occupied fi if test "x`date +%A`" = xSaturday; then fi This behavior was chosen on purpose: (i) it prevents messages in required macros from interrupting the messages in the requiring macros; (ii) it avoids bad surprises when shell conditionals are used, as in: if ...; then AC_REQUIRE([SOME_CHECK]) fi ... SOME_CHECK However, this implementation can lead to another class of problems. Consider the case where an outer macro first expands, then indirectly requires, an inner macro: AC_DEFUN([TESTA], [[echo in A if test -n "$SEEN_A" ; then echo duplicate ; fi SEEN_A=:]]) AC_DEFUN([TESTB], [AC_REQUIRE([TESTA])[echo in B if test -z "$SEEN_A" ; then echo bug ; fi]]) AC_DEFUN([TESTC], [AC_REQUIRE([TESTB])[echo in C]]) AC_DEFUN([OUTER], [[echo in OUTER] TESTA TESTC]) OUTER Prior to Autoconf 2.64, the implementation of ‘AC_REQUIRE’ recognized that ‘TESTB’ needed to be hoisted prior to the expansion of ‘OUTER’, but because ‘TESTA’ had already been directly expanded, it failed to hoist ‘TESTA’. Therefore, the expansion of ‘TESTB’ occurs prior to its prerequisites, leading to the following output: in B bug in OUTER in A in C Newer Autoconf is smart enough to recognize this situation, and hoists ‘TESTA’ even though it has already been expanded, but issues a syntax warning in the process. This is because the hoisted expansion of ‘TESTA’ defeats the purpose of using ‘AC_REQUIRE’ to avoid redundant code, and causes its own set of problems if the hoisted macro is not idempotent: in A in B in OUTER in A duplicate in C The bug is not in Autoconf, but in the macro definitions. If you ever pass a particular macro name to ‘AC_REQUIRE’, then you are implying that the macro only needs to be expanded once. But to enforce this, either the macro must be declared with ‘AC_DEFUN_ONCE’ (although this only helps in Autoconf 2.64 or newer), or all uses of that macro should be through ‘AC_REQUIRE’; directly expanding the macro defeats the point of using ‘AC_REQUIRE’ to eliminate redundant expansion. In the example, this rule of thumb was violated because ‘TESTB’ requires ‘TESTA’ while ‘OUTER’ directly expands it. One way of fixing the bug is to factor ‘TESTA’ into two macros, the portion designed for direct and repeated use (here, named ‘TESTA’), and the portion designed for one-shot output and used only inside ‘AC_REQUIRE’ (here, named ‘TESTA_PREREQ’). Then, by fixing all clients to use the correct calling convention according to their needs: AC_DEFUN([TESTA], [AC_REQUIRE([TESTA_PREREQ])[echo in A]]) AC_DEFUN([TESTA_PREREQ], [[echo in A_PREREQ if test -n "$SEEN_A" ; then echo duplicate ; fi SEEN_A=:]]) AC_DEFUN([TESTB], [AC_REQUIRE([TESTA_PREREQ])[echo in B if test -z "$SEEN_A" ; then echo bug ; fi]]) AC_DEFUN([TESTC], [AC_REQUIRE([TESTB])[echo in C]]) AC_DEFUN([OUTER], [[echo in OUTER] TESTA TESTC]) OUTER the resulting output will then obey all dependency rules and avoid any syntax warnings, whether the script is built with old or new Autoconf versions: in A_PREREQ in B in OUTER in A in C You can use the helper macros ‘AS_IF’ and ‘AS_CASE’ in top-level code to enforce expansion of required macros outside of shell conditional constructs; these helpers are not needed in the bodies of macros defined by ‘AC_DEFUN’. You are furthermore encouraged, although not required, to put all ‘AC_REQUIRE’ calls at the beginning of a macro. You can use ‘dnl’ to avoid the empty lines they leave. Autoconf will normally warn if an ‘AC_REQUIRE’ call refers to a macro that has not been defined. However, the ‘aclocal’ tool relies on parsing an incomplete set of input files to trace which macros have been required, in order to then pull in additional files that provide those macros; for this particular use case, pre-defining the macro ‘m4_require_silent_probe’ will avoid the warnings.  File: autoconf.info, Node: Suggested Ordering, Next: One-Shot Macros, Prev: Prerequisite Macros, Up: Dependencies Between Macros 10.3.2 Suggested Ordering ------------------------- Some macros should be run before another macro if both are called, but neither _requires_ that the other be called. For example, a macro that changes the behavior of the C compiler should be called before any macros that run the C compiler. Many of these dependencies are noted in the documentation. Autoconf provides the ‘AC_BEFORE’ macro to warn users when macros with this kind of dependency appear out of order in a ‘configure.ac’ file. The warning occurs when creating ‘configure’ from ‘configure.ac’, not when running ‘configure’. For example, ‘AC_PROG_CPP’ checks whether the C compiler can run the C preprocessor when given the ‘-E’ option. It should therefore be called after any macros that change which C compiler is being used, such as ‘AC_PROG_CC’. So ‘AC_PROG_CC’ contains: AC_BEFORE([$0], [AC_PROG_CPP])dnl This warns the user if a call to ‘AC_PROG_CPP’ has already occurred when ‘AC_PROG_CC’ is called. -- Macro: AC_BEFORE (THIS-MACRO-NAME, CALLED-MACRO-NAME) Make M4 print a warning message to the standard error output if CALLED-MACRO-NAME has already been called. THIS-MACRO-NAME should be the name of the macro that is calling ‘AC_BEFORE’. The macro CALLED-MACRO-NAME must have been defined using ‘AC_DEFUN’ or else contain a call to ‘AC_PROVIDE’ to indicate that it has been called.  File: autoconf.info, Node: One-Shot Macros, Prev: Suggested Ordering, Up: Dependencies Between Macros 10.3.3 One-Shot Macros ---------------------- Some macros should be called only once, either because calling them multiple time is unsafe, or because it is bad style. For instance Autoconf ensures that ‘AC_CANONICAL_BUILD’ and cousins (*note Canonicalizing::) are evaluated only once, because it makes no sense to run these expensive checks more than once. Such one-shot macros can be defined using ‘AC_DEFUN_ONCE’. -- Macro: AC_DEFUN_ONCE (MACRO-NAME, MACRO-BODY) Declare macro MACRO-NAME like ‘AC_DEFUN’ would (*note Macro Definitions::), but add additional logic that guarantees that only the first use of the macro (whether by direct expansion or ‘AC_REQUIRE’) causes an expansion of MACRO-BODY; the expansion will occur before the start of any enclosing macro defined by ‘AC_DEFUN’. Subsequent expansions are silently ignored. Generally, it does not make sense for MACRO-BODY to use parameters such as ‘$1’. Prior to Autoconf 2.64, a macro defined by ‘AC_DEFUN_ONCE’ would emit a warning if it was directly expanded a second time, so for portability, it is better to use ‘AC_REQUIRE’ than direct invocation of MACRO-NAME inside a macro defined by ‘AC_DEFUN’ (*note Prerequisite Macros::).  File: autoconf.info, Node: Obsoleting Macros, Next: Coding Style, Prev: Dependencies Between Macros, Up: Writing Autoconf Macros 10.4 Obsoleting Macros ====================== Configuration and portability technology has evolved over the years. Often better ways of solving a particular problem are developed, or ad-hoc approaches are systematized. This process has occurred in many parts of Autoconf. One result is that some of the macros are now considered “obsolete”; they still work, but are no longer considered the best thing to do, hence they should be replaced with more modern macros. Ideally, ‘autoupdate’ should replace the old macro calls with their modern implementation. Autoconf provides a simple means to obsolete a macro. -- Macro: AU_DEFUN (OLD-MACRO, IMPLEMENTATION, [MESSAGE], [SILENT]) Define OLD-MACRO as IMPLEMENTATION, just like ‘AC_DEFUN’, but also declare OLD-MACRO to be obsolete. When ‘autoupdate’ is run, occurrences of OLD-MACRO will be replaced by the text of IMPLEMENTATION in the updated ‘configure.ac’ file. If a simple textual replacement is not enough to finish the job of updating a ‘configure.ac’ to modern style, provide instructions for whatever additional manual work is required as MESSAGE. These instructions will be printed by ‘autoupdate’, and embedded in the updated ‘configure.ac’ file, next to the text of IMPLEMENTATION. Normally, ‘autoconf’ will also issue a warning (in the “obsolete” category) when it expands OLD-MACRO. This warning does not include MESSAGE; it only advises the maintainer to run ‘autoupdate’. If it is inappropriate to issue this warning, set the SILENT argument to the word ‘silent’. One might want to use a silent ‘AU_DEFUN’ when OLD-MACRO is used in a widely-distributed third-party macro. If that macro’s maintainers are aware of the need to update their code, it’s unnecessary to nag all of the transitive users of OLD-MACRO as well. This capability was added to ‘AU_DEFUN’ in Autoconf 2.70; older versions of autoconf will ignore the SILENT argument and issue the warning anyway. *Caution:* If IMPLEMENTATION contains M4 or M4sugar macros, they will be evaluated when ‘autoupdate’ is run, not emitted verbatim like the rest of IMPLEMENTATION. This cannot be avoided with extra quotation, because then OLD-MACRO will not work when it is called normally. See the definition of ‘AC_FOREACH’ in ‘general.m4’ for a workaround. -- Macro: AU_ALIAS (OLD-NAME, NEW-NAME, [SILENT]) A shorthand version of ‘AU_DEFUN’, to be used when a macro has simply been renamed. ‘autoupdate’ will replace calls to OLD-NAME with calls to NEW-NAME, keeping any arguments intact. No instructions for additional manual work will be printed. The SILENT argument works the same as the SILENT argument to ‘AU_DEFUN’. It was added to ‘AU_ALIAS’ in Autoconf 2.70. *Caution:* ‘AU_ALIAS’ cannot be used when NEW-NAME is an M4 or M4sugar macro. See above.  File: autoconf.info, Node: Coding Style, Prev: Obsoleting Macros, Up: Writing Autoconf Macros 10.5 Coding Style ================= The Autoconf macros follow a strict coding style. You are encouraged to follow this style, especially if you intend to distribute your macro, either by contributing it to Autoconf itself or the Autoconf Macro Archive (https://www.gnu.org/software/autoconf-archive/), or by other means. The first requirement is to pay great attention to the quotation. For more details, see *note Autoconf Language::, and *note M4 Quotation::. Do not try to invent new interfaces. It is likely that there is a macro in Autoconf that resembles the macro you are defining: try to stick to this existing interface (order of arguments, default values, etc.). We _are_ conscious that some of these interfaces are not perfect; nevertheless, when harmless, homogeneity should be preferred over creativity. Be careful about clashes both between M4 symbols and between shell variables. If you stick to the suggested M4 naming scheme (*note Macro Names::), you are unlikely to generate conflicts. Nevertheless, when you need to set a special value, _avoid using a regular macro name_; rather, use an “impossible” name. For instance, up to version 2.13, the macro ‘AC_SUBST’ used to remember what SYMBOL macros were already defined by setting ‘AC_SUBST_SYMBOL’, which is a regular macro name. But since there is a macro named ‘AC_SUBST_FILE’, it was just impossible to ‘AC_SUBST(FILE)’! In this case, ‘AC_SUBST(SYMBOL)’ or ‘_AC_SUBST(SYMBOL)’ should have been used (yes, with the parentheses). No Autoconf macro should ever enter the user-variable name space; i.e., except for the variables that are the actual result of running the macro, all shell variables should start with ‘ac_’. In addition, small macros or any macro that is likely to be embedded in other macros should be careful not to use obvious names. Do not use ‘dnl’ to introduce comments: most of the comments you are likely to write are either header comments which are not output anyway, or comments that should make their way into ‘configure’. There are exceptional cases where you do want to comment special M4 constructs, in which case ‘dnl’ is right, but keep in mind that it is unlikely. M4 ignores the leading blanks and newlines before each argument. Use this feature to indent in such a way that arguments are (more or less) aligned with the opening parenthesis of the macro being called. For instance, instead of AC_CACHE_CHECK(for EMX OS/2 environment, ac_cv_emxos2, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM(, [return __EMX__;])], [ac_cv_emxos2=yes], [ac_cv_emxos2=no])]) write AC_CACHE_CHECK([for EMX OS/2 environment], [ac_cv_emxos2], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [return __EMX__;])], [ac_cv_emxos2=yes], [ac_cv_emxos2=no])]) or even AC_CACHE_CHECK([for EMX OS/2 environment], [ac_cv_emxos2], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [return __EMX__;])], [ac_cv_emxos2=yes], [ac_cv_emxos2=no])]) When using ‘AC_RUN_IFELSE’ or any macro that cannot work when cross-compiling, provide a pessimistic value (typically ‘no’). Feel free to use various tricks to prevent auxiliary tools, such as syntax-highlighting editors, from behaving improperly. For instance, instead of: m4_bpatsubst([$1], [$"]) use m4_bpatsubst([$1], [$""]) so that Emacsen do not open an endless “string” at the first quote. For the same reasons, avoid: test $[#] != 0 and use: test $[@%:@] != 0 Otherwise, the closing bracket would be hidden inside a ‘#’-comment, breaking the bracket-matching highlighting from Emacsen. Note the preferred style to escape from M4: ‘$[1]’, ‘$[@]’, etc. Do not escape when it is unnecessary. Common examples of useless quotation are ‘[$]$1’ (write ‘$$1’), ‘[$]var’ (use ‘$var’), etc. If you add portability issues to the picture, you’ll prefer ‘${1+"$[@]"}’ to ‘"[$]@"’, and you’ll prefer do something better than hacking Autoconf ‘:-)’. When using ‘sed’, don’t use ‘-e’ except for indenting purposes. With the ‘s’ and ‘y’ commands, the preferred separator is ‘/’ unless ‘/’ itself might appear in the pattern or replacement, in which case you should use ‘|’, or optionally ‘,’ if you know the pattern and replacement cannot contain a file name. If none of these characters will do, choose a printable character that cannot appear in the pattern or replacement. Characters from the set ‘"#$&'()*;<=>?`|~’ are good choices if the pattern or replacement might contain a file name, since they have special meaning to the shell and are less likely to occur in file names. *Note Macro Definitions::, for details on how to define a macro. If a macro doesn’t use ‘AC_REQUIRE’, is expected to never be the object of an ‘AC_REQUIRE’ directive, and macros required by other macros inside arguments do not need to be expanded before this macro, then use ‘m4_define’. In case of doubt, use ‘AC_DEFUN’. Also take into account that public third-party macros need to use ‘AC_DEFUN’ in order to be found by ‘aclocal’ (*note (automake)Extending aclocal::). All the ‘AC_REQUIRE’ statements should be at the beginning of the macro, and each statement should be followed by ‘dnl’. You should not rely on the number of arguments: instead of checking whether an argument is missing, test that it is not empty. It provides both a simpler and a more predictable interface to the user, and saves room for further arguments. Unless the macro is short, try to leave the closing ‘])’ at the beginning of a line, followed by a comment that repeats the name of the macro being defined. This introduces an additional newline in ‘configure’; normally, that is not a problem, but if you want to remove it you can use ‘[]dnl’ on the last line. You can similarly use ‘[]dnl’ after a macro call to remove its newline. ‘[]dnl’ is recommended instead of ‘dnl’ to ensure that M4 does not interpret the ‘dnl’ as being attached to the preceding text or macro output. For example, instead of: AC_DEFUN([AC_PATH_X], [AC_MSG_CHECKING([for X]) AC_REQUIRE_CPP() # ...omitted... AC_MSG_RESULT([libraries $x_libraries, headers $x_includes]) fi]) you would write: AC_DEFUN([AC_PATH_X], [AC_REQUIRE_CPP()[]dnl AC_MSG_CHECKING([for X]) # ...omitted... AC_MSG_RESULT([libraries $x_libraries, headers $x_includes]) fi[]dnl ])# AC_PATH_X If the macro is long, try to split it into logical chunks. Typically, macros that check for a bug in a function and prepare its ‘AC_LIBOBJ’ replacement should have an auxiliary macro to perform this setup. Do not hesitate to introduce auxiliary macros to factor your code. In order to highlight the recommended coding style, here is a macro written the old way: dnl Check for EMX on OS/2. dnl _AC_EMXOS2 AC_DEFUN(_AC_EMXOS2, [AC_CACHE_CHECK(for EMX OS/2 environment, ac_cv_emxos2, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM(, return __EMX__;)], ac_cv_emxos2=yes, ac_cv_emxos2=no)]) test "x$ac_cv_emxos2" = xyes && EMXOS2=yes]) and the new way: # _AC_EMXOS2 # ---------- # Check for EMX on OS/2. m4_define([_AC_EMXOS2], [AC_CACHE_CHECK([for EMX OS/2 environment], [ac_cv_emxos2], [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [return __EMX__;])], [ac_cv_emxos2=yes], [ac_cv_emxos2=no])]) test "x$ac_cv_emxos2" = xyes && EMXOS2=yes[]dnl ])# _AC_EMXOS2  File: autoconf.info, Node: Portable Shell, Next: Portable Make, Prev: Writing Autoconf Macros, Up: Top 11 Portable Shell Programming ***************************** When writing your own checks, there are some shell-script programming techniques you should avoid in order to make your code portable. The Bourne shell and upward-compatible shells like the Korn shell and Bash have evolved over the years, and many features added to the original System7 shell are now supported on all interesting porting targets. However, the following discussion between Russ Allbery and Robert Lipe is worth reading: Russ Allbery: The GNU assumption that ‘/bin/sh’ is the one and only shell leads to a permanent deadlock. Vendors don’t want to break users’ existing shell scripts, and there are some corner cases in the Bourne shell that are not completely compatible with a Posix shell. Thus, vendors who have taken this route will _never_ (OK...“never say never”) replace the Bourne shell (as ‘/bin/sh’) with a Posix shell. Robert Lipe: This is exactly the problem. While most (at least most System V’s) do have a Bourne shell that accepts shell functions most vendor ‘/bin/sh’ programs are not the Posix shell. So while most modern systems do have a shell _somewhere_ that meets the Posix standard, the challenge is to find it. For this reason, part of the job of M4sh (*note Programming in M4sh::) is to find such a shell. But to prevent trouble, if you’re not using M4sh you should not take advantage of features that were added after Unix version 7, circa 1977 (*note Systemology::); you should not use aliases, negated character classes, or even ‘unset’. ‘#’ comments, while not in Unix version 7, were retrofitted in the original Bourne shell and can be assumed to be part of the least common denominator. On the other hand, if you’re using M4sh you can assume that the shell has the features that were added in SVR2 (circa 1984), including shell functions, ‘return’, ‘unset’, and I/O redirection for builtins. For more information, refer to . However, some pitfalls have to be avoided for portable use of these constructs; these will be documented in the rest of this chapter. See in particular *note Shell Functions:: and *note Limitations of Shell Builtins: Limitations of Builtins. Some ancient systems have quite small limits on the length of the ‘#!’ line; for instance, 32 bytes (not including the newline) on SunOS 4. However, these ancient systems are no longer of practical concern. The set of external programs you should run in a ‘configure’ script is fairly small. *Note Utilities in Makefiles: (standards)Utilities in Makefiles, for the list. This restriction allows users to start out with a fairly small set of programs and build the rest, avoiding too many interdependencies between packages. Some of these external utilities have a portable subset of features; see *note Limitations of Usual Tools::. There are other sources of documentation about shells. The specification for the Posix Shell Command Language (https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html), though more generous than the restrictive shell subset described above, is fairly portable nowadays. Also please see the Shell FAQs (http://www.faqs.org/faqs/unix-faq/shell/). * Menu: * Shellology:: A zoology of shells * Invoking the Shell:: Invoking the shell as a command * Here-Documents:: Quirks and tricks * File Descriptors:: FDs and redirections * Signal Handling:: Shells, signals, and headaches * File System Conventions:: File names * Shell Pattern Matching:: Pattern matching * Shell Substitutions:: Variable and command expansions * Assignments:: Varying side effects of assignments * Parentheses:: Parentheses in shell scripts * Slashes:: Slashes in shell scripts * Special Shell Variables:: Variables you should not change * Shell Functions:: What to look out for if you use them * Limitations of Builtins:: Portable use of not so portable /bin/sh * Limitations of Usual Tools:: Portable use of portable tools  File: autoconf.info, Node: Shellology, Next: Invoking the Shell, Up: Portable Shell 11.1 Shellology =============== There are several families of shells, most prominently the Bourne family and the C shell family which are deeply incompatible. If you want to write portable shell scripts, avoid members of the C shell family. The the Shell difference FAQ (http://www.faqs.org/faqs/unix-faq/shell/shell-differences/) includes a small history of Posix shells, and a comparison between several of them. Below we describe some of the members of the Bourne shell family. Ash Ash is often used on GNU/Linux and BSD systems as a light-weight Bourne-compatible shell. Ash 0.2 has some bugs that are fixed in the 0.3.x series, but portable shell scripts should work around them, since version 0.2 is still shipped with many GNU/Linux distributions. To be compatible with Ash 0.2: − don’t use ‘$?’ after expanding empty or unset variables, or at the start of an ‘eval’: foo= false $foo echo "Do not use it: $?" false eval 'echo "Do not use it: $?"' − don’t use command substitution within variable expansion: cat ${FOO=`bar`} − beware that single builtin substitutions are not performed by a subshell, hence their effect applies to the current shell! *Note Shell Substitutions::, item “Command Substitution”. Bash To detect whether you are running Bash, test whether ‘BASH_VERSION’ is set. To require Posix compatibility, run ‘set -o posix’. *Note Bash Posix Mode: (bash)Bash POSIX Mode, for details. Bash 2.05 and later Versions 2.05 and later of Bash use a different format for the output of the ‘set’ builtin, designed to make evaluating its output easier. However, this output is not compatible with earlier versions of Bash (or with many other shells, probably). So if you use Bash 2.05 or higher to execute ‘configure’, you’ll need to use Bash 2.05 for all other build tasks as well. Ksh The Korn shell is compatible with the Bourne family and it mostly conforms to Posix. It has two major variants commonly called ‘ksh88’ and ‘ksh93’, named after the years of initial release. It is usually called ‘ksh’, but is called ‘sh’ on some hosts if you set your path appropriately. On Solaris 11, ‘/bin/sh’ and ‘/usr/bin/ksh’ are both ‘ksh93’. On Solaris 10 and earlier, ‘/bin/sh’ is a pre-Posix Bourne shell and the Korn shell is found elsewhere: ‘/usr/bin/ksh’ is ‘ksh88’ on Solaris 2.0 through 10, ‘/usr/xpg4/bin/sh’ is a Posix-compliant variant of ‘ksh88’ on Solaris 9 and later, and ‘/usr/dt/bin/dtksh’ is ‘ksh93’. Variants that are not standard may be parts of optional packages. There is no extra charge for these packages, but they are not part of a minimal OS install and therefore some installations may not have it. Starting with Tru64 Version 4.0, the Korn shell ‘/usr/bin/ksh’ is also available as ‘/usr/bin/posix/sh’. If the environment variable ‘BIN_SH’ is set to ‘xpg4’, subsidiary invocations of the standard shell conform to Posix. Pdksh A public-domain clone of the Korn shell called ‘pdksh’ is widely available: it has most of the ‘ksh88’ features along with a few of its own. It usually sets ‘KSH_VERSION’, except if invoked as ‘/bin/sh’ on OpenBSD, and similarly to Bash you can require Posix compatibility by running ‘set -o posix’. Unfortunately, with ‘pdksh’ 5.2.14 (the latest stable version as of January 2007) Posix mode is buggy and causes ‘pdksh’ to depart from Posix in at least one respect, see *note Shell Substitutions::. Zsh To detect whether you are running ‘zsh’, test whether ‘ZSH_VERSION’ is set. By default ‘zsh’ is _not_ compatible with the Bourne shell: you must execute ‘emulate sh’, and for ‘zsh’ versions before 3.1.6-dev-18 you must also set ‘NULLCMD’ to ‘:’. *Note Compatibility: (zsh)Compatibility, for details. The default Mac OS X ‘sh’ was originally Zsh; it was changed to Bash in Mac OS X 10.2.  File: autoconf.info, Node: Invoking the Shell, Next: Here-Documents, Prev: Shellology, Up: Portable Shell 11.2 Invoking the Shell ======================= The Korn shell (up to at least version M-12/28/93d) has a bug when invoked on a file whose name does not contain a slash. It first searches for the file’s name in ‘PATH’, and if found it executes that rather than the original file. For example, assuming there is a binary executable ‘/usr/bin/script’ in your ‘PATH’, the last command in the following example fails because the Korn shell finds ‘/usr/bin/script’ and refuses to execute it as a shell script: $ touch xxyzzyz script $ ksh xxyzzyz $ ksh ./script $ ksh script ksh: script: cannot execute Bash 2.03 has a bug when invoked with the ‘-c’ option: if the option-argument ends in backslash-newline, Bash incorrectly reports a syntax error. The problem does not occur if a character follows the backslash: $ $ bash -c 'echo foo \ > ' bash: -c: line 2: syntax error: unexpected end of file $ bash -c 'echo foo \ > ' foo *Note Backslash-Newline-Empty::, for how this can cause problems in makefiles.  File: autoconf.info, Node: Here-Documents, Next: File Descriptors, Prev: Invoking the Shell, Up: Portable Shell 11.3 Here-Documents =================== Don’t rely on ‘\’ being preserved just because it has no special meaning together with the next symbol. In the native ‘sh’ on OpenBSD 2.7 ‘\"’ expands to ‘"’ in here-documents with unquoted delimiter. As a general rule, if ‘\\’ expands to ‘\’ use ‘\\’ to get ‘\’. With OpenBSD 2.7’s ‘sh’ $ cat < \" \\ > EOF " \ and with Bash: bash-2.04$ cat < \" \\ > EOF \" \ Using command substitutions in a here-document that is fed to a shell function is not portable. For example, with Solaris 10 ‘/bin/sh’: $ kitty () { cat; } $ kitty < `echo ok` > EOF /tmp/sh199886: cannot open $ echo $? 1 Some shells mishandle large here-documents: for example, Solaris 10 ‘dtksh’ and the UnixWare 7.1.1 Posix shell, which are derived from Korn shell version M-12/28/93d, mishandle braced variable expansion that crosses a 1024- or 4096-byte buffer boundary within a here-document. Only the part of the variable name after the boundary is used. For example, ‘${variable}’ could be replaced by the expansion of ‘${ble}’. If the end of the variable name is aligned with the block boundary, the shell reports an error, as if you used ‘${}’. Instead of ‘${variable-default}’, the shell may expand ‘${riable-default}’, or even ‘${fault}’. This bug can often be worked around by omitting the braces: ‘$variable’. The bug was fixed in ‘ksh93g’ (1998-04-30) but as of 2006 many operating systems were still shipping older versions with the bug. Empty here-documents are not portable either; with the following code, ‘zsh’ up to at least version 4.3.10 creates a file with a single newline, whereas other shells create an empty file: cat >file <; then assume this and that else check this check that check something else ... on and on forever ... fi A shell parses the whole ‘if’/‘fi’ construct, creating temporary files for each here-document in it. Some shells create links for such here-documents on every ‘fork’, so that the clean-up code they had installed correctly removes them. It is creating the links that can take the shell forever. Moving the tests out of the ‘if’/‘fi’, or creating multiple ‘if’/‘fi’ constructs, would improve the performance significantly. Anyway, this kind of construct is not exactly the typical use of Autoconf. In fact, it’s even not recommended, because M4 macros can’t look into shell conditionals, so we may fail to expand a macro when it was expanded before in a conditional path, and the condition turned out to be false at runtime, and we end up not executing the macro at all. Be careful with the use of ‘<<-’ to unindent here-documents. The behavior is only portable for stripping leading s, and things can silently break if an overzealous editor converts to using leading spaces (not all shells are nice enough to warn about unterminated here-documents). $ printf 'cat <<-x\n\t1\n\t 2\n\tx\n' | bash && echo done 1 2 done $ printf 'cat <<-x\n 1\n 2\n x\n' | bash-3.2 && echo done 1 2 x done  File: autoconf.info, Node: File Descriptors, Next: Signal Handling, Prev: Here-Documents, Up: Portable Shell 11.4 File Descriptors ===================== Most shells, if not all (including Bash, Zsh, Ash), output traces on stderr, even for subshells. This might result in undesirable content if you meant to capture the standard-error output of the inner command: $ ash -x -c '(eval "echo foo >&2") 2>stderr' $ cat stderr + eval echo foo >&2 + echo foo foo $ bash -x -c '(eval "echo foo >&2") 2>stderr' $ cat stderr + eval 'echo foo >&2' ++ echo foo foo $ zsh -x -c '(eval "echo foo >&2") 2>stderr' # Traces on startup files deleted here. $ cat stderr +zsh:1> eval echo foo >&2 +zsh:1> echo foo foo One workaround is to grep out uninteresting lines, hoping not to remove good ones. If you intend to redirect both standard error and standard output, redirect standard output first. This works better with HP-UX, since its shell mishandles tracing if standard error is redirected first: $ sh -x -c ': 2>err >out' + : + 2> err $ cat err 1> out Don’t try to redirect the standard error of a command substitution. It must be done _inside_ the command substitution. When running ‘: `cd /zorglub` 2>/dev/null’ expect the error message to escape, while ‘: `cd /zorglub 2>/dev/null`’ works properly. On the other hand, some shells, such as Solaris or FreeBSD ‘/bin/sh’, warn about missing programs before performing redirections. Therefore, to silently check whether a program exists, it is necessary to perform redirections on a subshell or brace group: $ /bin/sh -c 'nosuch 2>/dev/null' nosuch: not found $ /bin/sh -c '(nosuch) 2>/dev/null' $ /bin/sh -c '{ nosuch; } 2>/dev/null' $ bash -c 'nosuch 2>/dev/null' FreeBSD 6.2 sh may mix the trace output lines from the statements in a shell pipeline. It is worth noting that Zsh (but not Ash nor Bash) makes it possible in assignments though: ‘foo=`cd /zorglub` 2>/dev/null’. Some shells, like ‘ash’, don’t recognize bi-directional redirection (‘<>’). And even on shells that recognize it, it is not portable to use on fifos: Posix does not require read-write support for named pipes, and Cygwin does not support it: $ mkfifo fifo $ exec 5<>fifo $ echo hi >&5 bash: echo: write error: Communication error on send Furthermore, versions of ‘dash’ before 0.5.6 mistakenly truncate regular files when using ‘<>’: $ echo a > file $ bash -c ': 1<>file'; cat file a $ dash -c ': 1<>file'; cat file $ rm a Solaris 10 ‘/bin/sh’ executes redirected compound commands in a subshell, while other shells don’t: $ /bin/sh -c 'foo=0; { foo=1; } 2>/dev/null; echo $foo' 0 $ ksh -c 'foo=0; { foo=1; } 2>/dev/null; echo $foo' 1 $ bash -c 'foo=0; { foo=1; } 2>/dev/null; echo $foo' 1 When catering to old systems, don’t redirect the same file descriptor several times, as you are doomed to failure under Ultrix. ULTRIX V4.4 (Rev. 69) System #31: Thu Aug 10 19:42:23 GMT 1995 UWS V4.4 (Rev. 11) $ eval 'echo matter >fullness' >void illegal io $ eval '(echo matter >fullness)' >void illegal io $ (eval '(echo matter >fullness)') >void Ambiguous output redirect. In each case the expected result is of course ‘fullness’ containing ‘matter’ and ‘void’ being empty. However, this bug is probably not of practical concern to modern platforms. Solaris 10 ‘sh’ will try to optimize away a ‘:’ command (even if it is redirected) in a loop after the first iteration, or in a shell function after the first call: $ for i in 1 2 3 ; do : >x$i; done $ ls x* x1 $ f () { : >$1; }; f y1; f y2; f y3; $ ls y* y1 As a workaround, ‘echo’ or ‘eval’ can be used. Don’t rely on file descriptors 0, 1, and 2 remaining closed in a subsidiary program. If any of these descriptors is closed, the operating system may open an unspecified file for the descriptor in the new process image. Posix 2008 says this may be done only if the subsidiary program is set-user-ID or set-group-ID, but HP-UX 11.23 does it even for ordinary programs, and the next version of Posix will allow HP-UX behavior. If you want a file descriptor above 2 to be inherited into a child process, then you must use redirections specific to that command or a containing subshell or command group, rather than relying on ‘exec’ in the shell. In ‘ksh’ as well as HP-UX ‘sh’, file descriptors above 2 which are opened using ‘exec N>file’ are closed by a subsequent ‘exec’ (such as that involved in the fork-and-exec which runs a program or script): $ echo 'echo hello >&5' >k $ /bin/sh -c 'exec 5>t; ksh ./k; exec 5>&-; cat t hello $ bash -c 'exec 5>t; ksh ./k; exec 5>&-; cat t hello $ ksh -c 'exec 5>t; ksh ./k; exec 5>&-; cat t ./k[1]: 5: cannot open [Bad file number] $ ksh -c '(ksh ./k) 5>t; cat t' hello $ ksh -c '{ ksh ./k; } 5>t; cat t' hello $ ksh -c '5>t ksh ./k; cat t hello Don’t rely on duplicating a closed file descriptor to cause an error. With Solaris 10 ‘/bin/sh’, failed duplication is silently ignored, which can cause unintended leaks to the original file descriptor. In this example, observe the leak to standard output: $ bash -c 'echo hi >&3' 3>&-; echo $? bash: 3: Bad file descriptor 1 $ /bin/sh -c 'echo hi >&3' 3>&-; echo $? hi 0 Fortunately, an attempt to close an already closed file descriptor will portably succeed. Likewise, it is safe to use either style of ‘N<&-’ or ‘N>&-’ for closing a file descriptor, even if it doesn’t match the read/write mode that the file descriptor was opened with. DOS variants cannot rename or remove open files, such as in ‘mv foo bar >foo’ or ‘rm foo >foo’, even though this is perfectly portable among Posix hosts. A few ancient systems reserved some file descriptors. By convention, file descriptor 3 was opened to ‘/dev/tty’ when you logged into Eighth Edition (1985) through Tenth Edition Unix (1989). File descriptor 4 had a special use on the Stardent/Kubota Titan (circa 1990), though we don’t now remember what it was. Both these systems are obsolete, so it’s now safe to treat file descriptors 3 and 4 like any other file descriptors. On the other hand, you can’t portably use multi-digit file descriptors. ‘dash’ and Solaris ‘ksh’ don’t understand any file descriptor larger than ‘9’: $ bash -c 'exec 10>&-'; echo $? 0 $ ksh -c 'exec 9>&-'; echo $? 0 $ ksh -c 'exec 10>&-'; echo $? ksh[1]: exec: 10: not found 127 $ dash -c 'exec 9>&-'; echo $? 0 $ dash -c 'exec 10>&-'; echo $? exec: 1: 10: not found 2  File: autoconf.info, Node: Signal Handling, Next: File System Conventions, Prev: File Descriptors, Up: Portable Shell 11.5 Signal Handling ==================== Portable handling of signals within the shell is another major source of headaches. This is worsened by the fact that various different, mutually incompatible approaches are possible in this area, each with its distinctive merits and demerits. A detailed description of these possible approaches, as well as of their pros and cons, can be found in this article (https://www.cons.org/cracauer/sigint.html). Solaris 10 ‘/bin/sh’ automatically traps most signals by default; the shell still exits with error upon termination by one of those signals, but in such a case the exit status might be somewhat unexpected (even if allowed by POSIX, strictly speaking): $ bash -c 'kill -1 $$'; echo $? # Will exit 128 + (signal number). Hangup 129 $ /bin/ksh -c 'kill -15 $$'; echo $? # Likewise. Terminated 143 $ for sig in 1 2 3 15; do > echo $sig: > /bin/sh -c "kill -$s \$\$"; echo $? > done signal 1: Hangup 129 signal 2: 208 signal 3: 208 signal 15: 208 This gets even worse if one is using the POSIX “wait” interface to get details about the shell process terminations: it will result in the shell having exited normally, rather than by receiving a signal. $ cat > foo.c <<'END' #include /* for printf */ #include /* for system */ #include /* for WIF* macros */ int main(void) { int status = system ("kill -15 $$"); printf ("Terminated by signal: %s\n", WIFSIGNALED (status) ? "yes" : "no"); printf ("Exited normally: %s\n", WIFEXITED (status) ? "yes" : "no"); return 0; } END $ cc -o foo foo.c $ ./a.out # On GNU/Linux Terminated by signal: no Exited normally: yes $ ./a.out # On Solaris 10 Terminated by signal: yes Exited normally: no Various shells seem to handle ‘SIGQUIT’ specially: they ignore it even if it is not blocked, and even if the shell is not running interactively (in fact, even if the shell has no attached tty); among these shells are at least Bash (from version 2 onward), Zsh 4.3.12, Solaris 10 ‘/bin/ksh’ and ‘/usr/xpg4/bin/sh’, and AT&T ‘ksh93’ (2011). Still, ‘SIGQUIT’ seems to be trappable quite portably within all these shells. OTOH, some other shells doesn’t special-case the handling of ‘SIGQUIT’; among these shells are at least ‘pdksh’ 5.2.14, Solaris 10 and NetBSD 5.1 ‘/bin/sh’, and the Almquist Shell 0.5.5.1. Some shells (especially Korn shells and derivatives) might try to propagate to themselves a signal that has killed a child process; this is not a bug, but a conscious design choice (although its overall value might be debatable). The exact details of how this is attained vary from shell to shell. For example, upon running ‘perl -e 'kill 2, $$'’, after the perl process has been interrupted, AT&T ‘ksh93’ (2011) will proceed to send itself a ‘SIGINT’, while Solaris 10 ‘/bin/ksh’ and ‘/usr/xpg4/bin/sh’ will proceed to exit with status 130 (i.e., 128 + 2). In any case, if there is an active trap associated with ‘SIGINT’, those shells will correctly execute it. Some Korn shells, when a child process die due receiving a signal with signal number N, can leave in ‘$?’ an exit status of 256+N instead of the more common 128+N. Observe the difference between AT&T ‘ksh93’ (2011) and ‘bash’ 4.1.5 on Debian: $ /bin/ksh -c 'sh -c "kill -1 \$\$"; echo $?' /bin/ksh: line 1: 7837: Hangup 257 $ /bin/bash -c 'sh -c "kill -1 \$\$"; echo $?' /bin/bash: line 1: 7861 Hangup (sh -c "kill -1 \$\$") 129 This ‘ksh’ behavior is allowed by POSIX, if implemented with due care; see this Austin Group discussion (https://www.austingroupbugs.net/view.php?id=51) for more background. However, if it is not implemented with proper care, such a behavior might cause problems in some corner cases. To see why, assume we have a “wrapper” script like this: #!/bin/sh # Ignore some signals in the shell only, not in its child processes. trap : 1 2 13 15 wrapped_command "$@" ret=$? other_command exit $ret If ‘wrapped_command’ is interrupted by a ‘SIGHUP’ (which has signal number 1), ‘ret’ will be set to 257. Unless the ‘exit’ shell builtin is smart enough to understand that such a value can only have originated from a signal, and adjust the final wait status of the shell appropriately, the value 257 will just get truncated to 1 by the closing ‘exit’ call, so that a caller of the script will have no way to determine that termination by a signal was involved. Observe the different behavior of AT&T ‘ksh93’ (2011) and ‘bash’ 4.1.5 on Debian: $ cat foo.sh #!/bin/sh sh -c 'kill -1 $$' ret=$? echo $ret exit $ret $ /bin/ksh foo.sh; echo $? foo.sh: line 2: 12479: Hangup 257 1 $ /bin/bash foo.sh; echo $? foo.sh: line 2: 12487 Hangup (sh -c 'kill -1 $$') 129 129  File: autoconf.info, Node: File System Conventions, Next: Shell Pattern Matching, Prev: Signal Handling, Up: Portable Shell 11.6 File System Conventions ============================ Autoconf uses shell-script processing extensively, so the file names that it processes should not contain characters that are special to the shell. Special characters include space, tab, newline, NUL, and the following: " # $ & ' ( ) * ; < = > ? [ \ ` | Also, file names should not begin with ‘~’ or ‘-’, and should contain neither ‘-’ immediately after ‘/’ nor ‘~’ immediately after ‘:’. On Posix-like platforms, directory names should not contain ‘:’, as this runs afoul of ‘:’ used as the path separator. These restrictions apply not only to the files that you distribute, but also to the absolute file names of your source, build, and destination directories. On some Posix-like platforms, ‘!’ and ‘^’ are special too, so they should be avoided. Posix lets implementations treat leading ‘//’ specially, but requires leading ‘///’ and beyond to be equivalent to ‘/’. Most Unix variants treat ‘//’ like ‘/’. However, some treat ‘//’ as a “super-root” that can provide access to files that are not otherwise reachable from ‘/’. The super-root tradition began with Apollo Domain/OS, which died out long ago, but unfortunately Cygwin has revived it. While ‘autoconf’ and friends are usually run on some Posix variety, they can be used on other systems, most notably DOS variants. This impacts several assumptions regarding file names. For example, the following code: case $foo_dir in /*) # Absolute ;; *) foo_dir=$dots$foo_dir ;; esac fails to properly detect absolute file names on those systems, because they can use a drivespec, and usually use a backslash as directory separator. If you want to be portable to DOS variants (at the price of rejecting valid but oddball Posix file names like ‘a:\b’), you can check for absolute file names like this: case $foo_dir in [\\/]* | ?:[\\/]* ) # Absolute ;; *) foo_dir=$dots$foo_dir ;; esac Make sure you quote the brackets if appropriate and keep the backslash as first character. *Note Limitations of Shell Builtins: case. Also, because the colon is used as part of a drivespec, these systems don’t use it as path separator. When creating or accessing paths, you can use the ‘PATH_SEPARATOR’ output variable instead. ‘configure’ sets this to the appropriate value for the build system (‘:’ or ‘;’) when it starts up. File names need extra care as well. While DOS variants that are Posixy enough to run ‘autoconf’ (such as DJGPP) are usually able to handle long file names properly, there are still limitations that can seriously break packages. Several of these issues can be easily detected by the doschk (https://ftp.gnu.org/gnu/non-gnu/doschk/doschk-1.1.tar.gz) package. A short overview follows; problems are marked with SFN/LFN to indicate where they apply: SFN means the issues are only relevant to plain DOS, not to DOS under Microsoft Windows variants, while LFN identifies problems that exist even under Microsoft Windows variants. No multiple dots (SFN) DOS cannot handle multiple dots in file names. This is an especially important thing to remember when building a portable configure script, as ‘autoconf’ uses a .in suffix for template files. This is perfectly OK on Posix variants: AC_CONFIG_HEADERS([config.h]) AC_CONFIG_FILES([source.c foo.bar]) AC_OUTPUT but it causes problems on DOS, as it requires ‘config.h.in’, ‘source.c.in’ and ‘foo.bar.in’. To make your package more portable to DOS-based environments, you should use this instead: AC_CONFIG_HEADERS([config.h:config.hin]) AC_CONFIG_FILES([source.c:source.cin foo.bar:foobar.in]) AC_OUTPUT No leading dot (SFN) DOS cannot handle file names that start with a dot. This is usually not important for ‘autoconf’. Case insensitivity (LFN) DOS is case insensitive, so you cannot, for example, have both a file called ‘INSTALL’ and a directory called ‘install’. This also affects ‘make’; if there’s a file called ‘INSTALL’ in the directory, ‘make install’ does nothing (unless the ‘install’ target is marked as PHONY). The 8+3 limit (SFN) Because the DOS file system only stores the first 8 characters of the file name and the first 3 of the extension, those must be unique. That means that ‘foobar-part1.c’, ‘foobar-part2.c’ and ‘foobar-prettybird.c’ all resolve to the same file name (‘FOOBAR-P.C’). The same goes for ‘foo.bar’ and ‘foo.bartender’. The 8+3 limit is not usually a problem under Microsoft Windows, as it uses numeric tails in the short version of file names to make them unique. However, a registry setting can turn this behavior off. While this makes it possible to share file trees containing long file names between SFN and LFN environments, it also means the above problem applies there as well. Invalid characters (LFN) Some characters are invalid in DOS file names, and should therefore be avoided. In a LFN environment, these are ‘/’, ‘\’, ‘?’, ‘*’, ‘:’, ‘<’, ‘>’, ‘|’ and ‘"’. In a SFN environment, other characters are also invalid. These include ‘+’, ‘,’, ‘[’ and ‘]’. Invalid names (LFN) Some DOS file names are reserved, and cause problems if you try to use files with those names. These names include ‘CON’, ‘AUX’, ‘COM1’, ‘COM2’, ‘COM3’, ‘COM4’, ‘LPT1’, ‘LPT2’, ‘LPT3’, ‘NUL’, and ‘PRN’. File names are case insensitive, so even names like ‘aux/config.guess’ are disallowed.  File: autoconf.info, Node: Shell Pattern Matching, Next: Shell Substitutions, Prev: File System Conventions, Up: Portable Shell 11.7 Shell Pattern Matching =========================== Nowadays portable patterns can use negated character classes like ‘[!-aeiou]’. The older syntax ‘[^-aeiou]’ is supported by some shells but not others; hence portable scripts should never use ‘^’ as the first character of a bracket pattern. Outside the C locale, patterns like ‘[a-z]’ are problematic since they may match characters that are not lower-case letters.  File: autoconf.info, Node: Shell Substitutions, Next: Assignments, Prev: Shell Pattern Matching, Up: Portable Shell 11.8 Shell Substitutions ======================== Contrary to a persistent urban legend, the Bourne shell does not systematically split variables and back-quoted expressions, in particular on the right-hand side of assignments and in the argument of ‘case’. For instance, the following code: case "$given_srcdir" in .) top_srcdir="`echo "$dots" | sed 's|/$||'`" ;; *) top_srcdir="$dots$given_srcdir" ;; esac is more readable when written as: case $given_srcdir in .) top_srcdir=`echo "$dots" | sed 's|/$||'` ;; *) top_srcdir=$dots$given_srcdir ;; esac and in fact it is even _more_ portable: in the first case of the first attempt, the computation of ‘top_srcdir’ is not portable, since not all shells properly understand ‘"`..."..."...`"’, for example Solaris 10 ‘ksh’: $ foo="`echo " bar" | sed 's, ,,'`" ksh: : cannot execute ksh: bar | sed 's, ,,': cannot execute Posix does not specify behavior for this sequence. On the other hand, behavior for ‘"`...\"...\"...`"’ is specified by Posix, but in practice, not all shells understand it the same way: pdksh 5.2.14 prints spurious quotes when in Posix mode: $ echo "`echo \"hello\"`" hello $ set -o posix $ echo "`echo \"hello\"`" "hello" There is just no portable way to use double-quoted strings inside double-quoted back-quoted expressions (pfew!). Bash 4.1 has a bug where quoted empty strings adjacent to unquoted parameter expansions are elided during word splitting. Meanwhile, zsh does not perform word splitting except when in Bourne compatibility mode. In the example below, the correct behavior is to have five arguments to the function, and exactly two spaces on either side of the middle ‘-’, since word splitting collapses multiple spaces in ‘$f’ but leaves empty arguments intact. $ bash -c 'n() { echo "$#$@"; }; f=" - "; n - ""$f"" -' 3- - - $ ksh -c 'n() { echo "$#$@"; }; f=" - "; n - ""$f"" -' 5- - - $ zsh -c 'n() { echo "$#$@"; }; f=" - "; n - ""$f"" -' 3- - - $ zsh -c 'emulate sh; > n() { echo "$#$@"; }; f=" - "; n - ""$f"" -' 5- - - You can work around this by doing manual word splitting, such as using ‘"$str" $list’ rather than ‘"$str"$list’. There are also portability pitfalls with particular expansions: ‘$@’ One of the most famous shell-portability issues is related to ‘"$@"’. When there are no positional arguments, Posix says that ‘"$@"’ is supposed to be equivalent to nothing, but the original Unix version 7 Bourne shell treated it as equivalent to ‘""’ instead, and this behavior survives in later implementations like Digital Unix 5.0. The traditional way to work around this portability problem is to use ‘${1+"$@"}’. Unfortunately this method does not work with Zsh (3.x and 4.x), which is used on Mac OS X. When emulating the Bourne shell, Zsh performs word splitting on ‘${1+"$@"}’: zsh $ emulate sh zsh $ for i in "$@"; do echo $i; done Hello World ! zsh $ for i in ${1+"$@"}; do echo $i; done Hello World ! Zsh handles plain ‘"$@"’ properly, but we can’t use plain ‘"$@"’ because of the portability problems mentioned above. One workaround relies on Zsh’s “global aliases” to convert ‘${1+"$@"}’ into ‘"$@"’ by itself: test ${ZSH_VERSION+y} && alias -g '${1+"$@"}'='"$@"' Zsh only recognizes this alias when a shell word matches it exactly; ‘"foo"${1+"$@"}’ remains subject to word splitting. Since this case always yields at least one shell word, use plain ‘"$@"’. A more conservative workaround is to avoid ‘"$@"’ if it is possible that there may be no positional arguments. For example, instead of: cat conftest.c "$@" you can use this instead: case $# in 0) cat conftest.c;; *) cat conftest.c "$@";; esac Autoconf macros often use the ‘set’ command to update ‘$@’, so if you are writing shell code intended for ‘configure’ you should not assume that the value of ‘$@’ persists for any length of time. ‘${10}’ The 10th, 11th, ... positional parameters can be accessed only after a ‘shift’. The 7th Edition shell reported an error if given ‘${10}’, and Solaris 10 ‘/bin/sh’ still acts that way: $ set 1 2 3 4 5 6 7 8 9 10 $ echo ${10} bad substitution Conversely, not all shells obey the Posix rule that when braces are omitted, multiple digits beyond a ‘$’ imply the single-digit positional parameter expansion concatenated with the remaining literal digits. To work around the issue, you must use braces. $ bash -c 'set a b c d e f g h i j; echo $10 ${1}0' a0 a0 $ dash -c 'set a b c d e f g h i j; echo $10 ${1}0' j a0 ‘${VAR:-VALUE}’ ‘${VAR:=VALUE}’ ‘${VAR:?VALUE}’ ‘${VAR:+VALUE}’ Old BSD shells, including the Ultrix ‘sh’, don’t accept the colon for any shell substitution, and complain and die. Similarly for ‘${VAR:=VALUE}’, ‘${VAR:?VALUE}’, etc. However, all shells that support functions allow the use of colon in shell substitution, and since m4sh requires functions, you can portably use null variable substitution patterns in configure scripts. ‘${VAR-VALUE}’ ‘${VAR:-VALUE}’ ‘${VAR=VALUE}’ ‘${VAR:=VALUE}’ ‘${VAR?VALUE}’ ‘${VAR:?VALUE}’ ‘${VAR+VALUE}’ ‘${VAR:+VALUE}’ When using ‘${VAR-VALUE}’ or similar notations that modify a parameter expansion, Posix requires that VALUE must be a single shell word, which can contain quoted strings but cannot contain unquoted spaces. If this requirement is not met Solaris 10 ‘/bin/sh’ sometimes complains, and anyway the behavior is not portable. $ /bin/sh -c 'echo ${a-b c}' /bin/sh: bad substitution $ /bin/sh -c 'echo ${a-'\''b c'\''}' b c $ /bin/sh -c 'echo "${a-b c}"' b c $ /bin/sh -c 'cat < broken $ echo "`printf 'foo\r\n'`"" bar" | cmp - broken - broken differ: char 4, line 1 Upon interrupt or SIGTERM, some shells may abort a command substitution, replace it with a null string, and wrongly evaluate the enclosing command before entering the trap or ending the script. This can lead to spurious errors: $ sh -c 'if test `sleep 5; echo hi` = hi; then echo yes; fi' $ ^C sh: test: hi: unexpected operator/operand You can avoid this by assigning the command substitution to a temporary variable: $ sh -c 'res=`sleep 5; echo hi` if test "x$res" = xhi; then echo yes; fi' $ ^C ‘$(COMMANDS)’ This construct is meant to replace ‘`COMMANDS`’, and it has most of the problems listed under ‘`COMMANDS`’. This construct can be nested while this is impossible to do portably with back quotes. Although it is almost universally supported, unfortunately Solaris 10 and earlier releases lack it: $ showrev -c /bin/sh | grep version Command version: SunOS 5.10 Generic 142251-02 Sep 2010 $ echo $(echo blah) syntax error: `(' unexpected nor does IRIX 6.5’s Bourne shell: $ uname -a IRIX firebird-image 6.5 07151432 IP22 $ echo $(echo blah) $(echo blah) If you do use ‘$(COMMANDS)’, make sure that the commands do not start with a parenthesis, as that would cause confusion with a different notation ‘$((EXPRESSION))’ that in modern shells is an arithmetic expression not a command. To avoid the confusion, insert a space between the two opening parentheses. Avoid COMMANDS that contain unbalanced parentheses in here-documents, comments, or case statement patterns, as many shells mishandle them. For example, Bash 3.1, ‘ksh88’, ‘pdksh’ 5.2.14, and Zsh 4.2.6 all mishandle the following valid command: echo $(case x in x) echo hello;; esac) ‘$((EXPRESSION))’ Arithmetic expansion is not portable as some shells (most notably Solaris 10 ‘/bin/sh’) don’t support it. Among shells that do support ‘$(( ))’, not all of them obey the Posix rule that octal and hexadecimal constants must be recognized: $ bash -c 'echo $(( 010 + 0x10 ))' 24 $ zsh -c 'echo $(( 010 + 0x10 ))' 26 $ zsh -c 'emulate sh; echo $(( 010 + 0x10 ))' 24 $ pdksh -c 'echo $(( 010 + 0x10 ))' pdksh: 010 + 0x10 : bad number `0x10' $ pdksh -c 'echo $(( 010 ))' 10 When it is available, using arithmetic expansion provides a noticeable speedup in script execution; but testing for support requires ‘eval’ to avoid syntax errors. The following construct is used by ‘AS_VAR_ARITH’ to provide arithmetic computation when all arguments are provided in decimal and without a leading zero, and all operators are properly quoted and appear as distinct arguments: if ( eval 'test $(( 1 + 1 )) = 2' ) 2>/dev/null; then eval 'func_arith () { func_arith_result=$(( $* )) }' else func_arith () { func_arith_result=`expr "$@"` } fi func_arith 1 + 1 foo=$func_arith_result ‘^’ Always quote ‘^’, otherwise traditional shells such as ‘/bin/sh’ on Solaris 10 treat this like ‘|’.  File: autoconf.info, Node: Assignments, Next: Parentheses, Prev: Shell Substitutions, Up: Portable Shell 11.9 Assignments ================ When setting several variables in a row, be aware that the order of the evaluation is undefined. For instance ‘foo=1 foo=2; echo $foo’ gives ‘1’ with Solaris 10 ‘/bin/sh’, but ‘2’ with Bash. You must use ‘;’ to enforce the order: ‘foo=1; foo=2; echo $foo’. Don’t rely on the following to find ‘subdir/program’: PATH=subdir$PATH_SEPARATOR$PATH program as this does not work with Zsh 3.0.6. Use something like this instead: (PATH=subdir$PATH_SEPARATOR$PATH; export PATH; exec program) Don’t rely on the exit status of an assignment: Ash 0.2 does not change the status and propagates that of the last statement: $ false || foo=bar; echo $? 1 $ false || foo=`:`; echo $? 0 and to make things even worse, QNX 4.25 just sets the exit status to 0 in any case: $ foo=`exit 1`; echo $? 0 To assign default values, follow this algorithm: 1. If the default value is a literal and does not contain any closing brace, use: : "${var='my literal'}" 2. If the default value contains no closing brace, has to be expanded, and the variable being initialized is not intended to be IFS-split (i.e., it’s not a list), then use: : ${var="$default"} 3. If the default value contains no closing brace, has to be expanded, and the variable being initialized is intended to be IFS-split (i.e., it’s a list), then use: var=${var="$default"} 4. If the default value contains a closing brace, then use: test ${var+y} || var="has a '}'" In most cases ‘var=${var="$default"}’ is fine, but in case of doubt, just use the last form. *Note Shell Substitutions::, items ‘${VAR:-VALUE}’ and ‘${VAR=VALUE}’ for the rationale.  File: autoconf.info, Node: Parentheses, Next: Slashes, Prev: Assignments, Up: Portable Shell 11.10 Parentheses in Shell Scripts ================================== Beware of two opening parentheses in a row, as many shell implementations treat them specially, and Posix says that a portable script cannot use ‘((’ outside the ‘$((’ form used for shell arithmetic. In traditional shells, ‘((cat))’ behaves like ‘(cat)’; but many shells, including Bash and the Korn shell, treat ‘((cat))’ as an arithmetic expression equivalent to ‘let "cat"’, and may or may not report an error when they detect that ‘cat’ is not a number. As another example, ‘pdksh’ 5.2.14 does not treat the following code as a traditional shell would: if ((true) || false); then echo ok fi To work around this problem, insert a space between the two opening parentheses. There is a similar problem and workaround with ‘$((’; see *note Shell Substitutions::.  File: autoconf.info, Node: Slashes, Next: Special Shell Variables, Prev: Parentheses, Up: Portable Shell 11.11 Slashes in Shell Scripts ============================== Unpatched Tru64 5.1 ‘sh’ omits the last slash of command-line arguments that contain two trailing slashes: $ echo / // /// //// .// //. / / // /// ./ //. $ x=// $ eval "echo \$x" / $ set -x $ echo abc | tr -t ab // + echo abc + tr -t ab / /bc Unpatched Tru64 4.0 ‘sh’ adds a slash after ‘"$var"’ if the variable is empty and the second double-quote is followed by a word that begins and ends with slash: $ sh -xc 'p=; echo "$p"/ouch/' p= + echo //ouch/ //ouch/ However, our understanding is that patches are available, so perhaps it’s not worth worrying about working around these horrendous bugs.  File: autoconf.info, Node: Special Shell Variables, Next: Shell Functions, Prev: Slashes, Up: Portable Shell 11.12 Special Shell Variables ============================= Some shell variables should not be used, since they can have a deep influence on the behavior of the shell. In order to recover a sane behavior from the shell, some variables should be unset; M4sh takes care of this and provides fallback values, whenever needed, to cater for a very old ‘/bin/sh’ that does not support ‘unset’. (*note Portable Shell Programming: Portable Shell.). As a general rule, shell variable names containing a lower-case letter are safe; you can define and use these variables without worrying about their effect on the underlying system, and without worrying about whether the shell changes them unexpectedly. (The exception is the shell variable ‘status’, as described below.) Here is a list of names that are known to cause trouble. This list is not exhaustive, but you should be safe if you avoid the name ‘status’ and names containing only upper-case letters and underscores. ‘?’ Not all shells correctly reset ‘$?’ after conditionals (*note Limitations of Shell Builtins: if.). Not all shells manage ‘$?’ correctly in shell functions (*note Shell Functions::) or in traps (*note Limitations of Shell Builtins: trap.). Not all shells reset ‘$?’ to zero after an empty command. $ bash -c 'false; $empty; echo $?' 0 $ zsh -c 'false; $empty; echo $?' 1 ‘_’ Many shells reserve ‘$_’ for various purposes, e.g., the name of the last command executed. ‘BIN_SH’ In Tru64, if ‘BIN_SH’ is set to ‘xpg4’, subsidiary invocations of the standard shell conform to Posix. ‘CDPATH’ When this variable is set it specifies a list of directories to search when invoking ‘cd’ with a relative file name that did not start with ‘./’ or ‘../’. Posix 1003.1-2001 says that if a nonempty directory name from ‘CDPATH’ is used successfully, ‘cd’ prints the resulting absolute file name. Unfortunately this output can break idioms like ‘abs=`cd src && pwd`’ because ‘abs’ receives the name twice. Also, many shells do not conform to this part of Posix; for example, ‘zsh’ prints the result only if a directory name other than ‘.’ was chosen from ‘CDPATH’. In practice the shells that have this problem also support ‘unset’, so you can work around the problem as follows: (unset CDPATH) >/dev/null 2>&1 && unset CDPATH You can also avoid output by ensuring that your directory name is absolute or anchored at ‘./’, as in ‘abs=`cd ./src && pwd`’. Configure scripts use M4sh, which automatically unsets ‘CDPATH’ if possible, so you need not worry about this problem in those scripts. ‘CLICOLOR_FORCE’ When this variable is set, some implementations of tools like ‘ls’ attempt to add color to their output via terminal escape sequences, even when the output is not directed to a terminal, and can thus cause spurious failures in scripts. Configure scripts use M4sh, which automatically unsets this variable. ‘DUALCASE’ In the MKS shell, case statements and file name generation are case-insensitive unless ‘DUALCASE’ is nonzero. Autoconf-generated scripts export this variable when they start up. ‘ENV’ ‘MAIL’ ‘MAILPATH’ ‘PS1’ ‘PS2’ ‘PS4’ These variables should not matter for shell scripts, since they are supposed to affect only interactive shells. However, at least one shell (the pre-3.0 UWIN Korn shell) gets confused about whether it is interactive, which means that (for example) a ‘PS1’ with a side effect can unexpectedly modify ‘$?’. To work around this bug, M4sh scripts (including ‘configure’ scripts) do something like this: (unset ENV) >/dev/null 2>&1 && unset ENV MAIL MAILPATH PS1='$ ' PS2='> ' PS4='+ ' (actually, there is some complication due to bugs in ‘unset’; *note Limitations of Shell Builtins: unset.). ‘FPATH’ The Korn shell uses ‘FPATH’ to find shell functions, so avoid ‘FPATH’ in portable scripts. ‘FPATH’ is consulted after ‘PATH’, but you still need to be wary of tests that use ‘PATH’ to find whether a command exists, since they might report the wrong result if ‘FPATH’ is also set. ‘GREP_OPTIONS’ When this variable is set, some implementations of ‘grep’ honor these options, even if the options include direction to enable colored output via terminal escape sequences, and the result can cause spurious failures when the output is not directed to a terminal. Configure scripts use M4sh, which automatically unsets this variable. ‘IFS’ Long ago, shell scripts inherited ‘IFS’ from the environment, but this caused many problems so modern shells ignore any environment settings for ‘IFS’. Don’t set the first character of ‘IFS’ to backslash. Indeed, Bourne shells use the first character (backslash) when joining the components in ‘"$@"’ and some shells then reinterpret (!) the backslash escapes, so you can end up with backspace and other strange characters. The proper value for ‘IFS’ (in regular code, not when performing splits) is ‘’. The first character is especially important, as it is used to join the arguments in ‘$*’; however, note that traditional shells, but also bash-2.04, fail to adhere to this and join with a space anyway. M4sh guarantees that ‘IFS’ will have the default value at the beginning of a script, and many macros within autoconf rely on this setting. It is okay to use blocks of shell code that temporarily change the value of ‘IFS’ in order to split on another character, but remember to restore it before expanding further macros. Unsetting ‘IFS’ instead of resetting it to the default sequence is not suggested, since code that tries to save and restore the variable’s value will incorrectly reset it to an empty value, thus disabling field splitting: unset IFS # default separators used for field splitting save_IFS=$IFS IFS=: # ... IFS=$save_IFS # no field splitting performed ‘LANG’ ‘LC_ALL’ ‘LC_COLLATE’ ‘LC_CTYPE’ ‘LC_MESSAGES’ ‘LC_MONETARY’ ‘LC_NUMERIC’ ‘LC_TIME’ You should set all these variables to ‘C’ because so much configuration code assumes the C locale and Posix requires that locale environment variables be set to ‘C’ if the C locale is desired; ‘configure’ scripts and M4sh do that for you. Export these variables after setting them. ‘LANGUAGE’ ‘LANGUAGE’ is not specified by Posix, but it is a GNU extension that overrides ‘LC_ALL’ in some cases, so you (or M4sh) should set it too. ‘LC_ADDRESS’ ‘LC_IDENTIFICATION’ ‘LC_MEASUREMENT’ ‘LC_NAME’ ‘LC_PAPER’ ‘LC_TELEPHONE’ These locale environment variables are GNU extensions. They are treated like their Posix brethren (‘LC_COLLATE’, etc.) as described above. ‘LINENO’ Most modern shells provide the current line number in ‘LINENO’. Its value is the line number of the beginning of the current command. M4sh, and hence Autoconf, attempts to execute ‘configure’ with a shell that supports ‘LINENO’. If no such shell is available, it attempts to implement ‘LINENO’ with a Sed prepass that replaces each instance of the string ‘$LINENO’ (not followed by an alphanumeric character) with the line’s number. In M4sh scripts you should execute ‘AS_LINENO_PREPARE’ so that these workarounds are included in your script; configure scripts do this automatically in ‘AC_INIT’. You should not rely on ‘LINENO’ within ‘eval’ or shell functions, as the behavior differs in practice. The presence of a quoted newline within simple commands can alter which line number is used as the starting point for ‘$LINENO’ substitutions within that command. Also, the possibility of the Sed prepass means that you should not rely on ‘$LINENO’ when quoted, when in here-documents, or when line continuations are used. Subshells should be OK, though. In the following example, lines 1, 9, and 14 are portable, but the other instances of ‘$LINENO’ do not have deterministic values: $ cat lineno echo 1. $LINENO echo "2. $LINENO 3. $LINENO" cat < N > s,$,-, > t loop > :loop > s,^\([0-9]*\)\(.*\)[$]LINENO\([^a-zA-Z0-9_]\),\1\2\1\3, > t loop > s,-$,, > s,^[0-9]*\n,, > ' | > sh 1. 1 2. 2 3. 3 5. 5 6. 6 7. \7 9. 9 10. 10 11. 11 12. 12 13. 13 14. 14 15. 15 18. 16 18. 17 19. 20 In particular, note that ‘config.status’ (and any other subsidiary script created by ‘AS_INIT_GENERATED’) might report line numbers relative to the parent script as a result of the potential Sed pass. ‘NULLCMD’ When executing the command ‘>foo’, ‘zsh’ executes ‘$NULLCMD >foo’ unless it is operating in Bourne shell compatibility mode and the ‘zsh’ version is newer than 3.1.6-dev-18. If you are using an older ‘zsh’ and forget to set ‘NULLCMD’, your script might be suspended waiting for data on its standard input. ‘options’ For ‘zsh’ 4.3.10, ‘options’ is treated as an associative array even after ‘emulate sh’, so it should not be used. ‘PATH_SEPARATOR’ On DJGPP systems, the ‘PATH_SEPARATOR’ environment variable can be set to either ‘:’ or ‘;’ to control the path separator Bash uses to set up certain environment variables (such as ‘PATH’). You can set this variable to ‘;’ if you want ‘configure’ to use ‘;’ as a separator; this might be useful if you plan to use non-Posix shells to execute files. *Note File System Conventions::, for more information about ‘PATH_SEPARATOR’. ‘POSIXLY_CORRECT’ In the GNU environment, exporting ‘POSIXLY_CORRECT’ with any value (even empty) causes programs to try harder to conform to Posix. Autoconf does not directly manipulate this variable, but ‘bash’ ties the shell variable ‘POSIXLY_CORRECT’ to whether the script is running in Posix mode. Therefore, take care when exporting or unsetting this variable, so as not to change whether ‘bash’ is in Posix mode. $ bash --posix -c 'set -o | grep posix > unset POSIXLY_CORRECT > set -o | grep posix' posix on posix off ‘PWD’ Posix 1003.1-2001 requires that ‘cd’ and ‘pwd’ must update the ‘PWD’ environment variable to point to the logical name of the current directory, but traditional shells do not support this. This can cause confusion if one shell instance maintains ‘PWD’ but a subsidiary and different shell does not know about ‘PWD’ and executes ‘cd’; in this case ‘PWD’ points to the wrong directory. Use ‘`pwd`’ rather than ‘$PWD’. ‘RANDOM’ Many shells provide ‘RANDOM’, a variable that returns a different integer each time it is used. Most of the time, its value does not change when it is not used, but on IRIX 6.5 the value changes all the time. This can be observed by using ‘set’. It is common practice to use ‘$RANDOM’ as part of a file name, but code shouldn’t rely on ‘$RANDOM’ expanding to a nonempty string. ‘status’ This variable is an alias to ‘$?’ for ‘zsh’ (at least 3.1.6), hence read-only. Do not use it.  File: autoconf.info, Node: Shell Functions, Next: Limitations of Builtins, Prev: Special Shell Variables, Up: Portable Shell 11.13 Shell Functions ===================== Nowadays, it is difficult to find a shell that does not support shell functions at all. However, some differences should be expected. When declaring a shell function, you must include whitespace between the ‘)’ after the function name and the start of the compound expression, to avoid upsetting ‘ksh’. While it is possible to use any compound command, most scripts use ‘{...}’. $ /bin/sh -c 'a(){ echo hi;}; a' hi $ ksh -c 'a(){ echo hi;}; a' ksh: syntax error at line 1: `}' unexpected $ ksh -c 'a() { echo hi;}; a' hi Inside a shell function, you should not rely on the error status of a subshell if the last command of that subshell was ‘exit’ or ‘trap’, as this triggers bugs in zsh 4.x; while Autoconf tries to find a shell that does not exhibit the bug, zsh might be the only shell present on the user’s machine. Likewise, the state of ‘$?’ is not reliable when entering a shell function. This has the effect that using a function as the first command in a ‘trap’ handler can cause problems. $ bash -c 'foo() { echo $?; }; trap foo 0; (exit 2); exit 2'; echo $? 2 2 $ ash -c 'foo() { echo $?; }; trap foo 0; (exit 2); exit 2'; echo $? 0 2 DJGPP bash 2.04 has a bug in that ‘return’ from a shell function which also used a command substitution causes a segmentation fault. To work around the issue, you can use ‘return’ from a subshell, or ‘AS_SET_STATUS’ as last command in the execution flow of the function (*note Common Shell Constructs::). Not all shells treat shell functions as simple commands impacted by ‘set -e’, for example with Solaris 10 ‘/bin/sh’: $ bash -c 'f() { return 1; }; set -e; f; echo oops' $ /bin/sh -c 'f() { return 1; }; set -e; f; echo oops' oops Shell variables and functions may share the same namespace, for example with Solaris 10 ‘/bin/sh’: $ f () { :; }; f=; f f: not found For this reason, Autoconf (actually M4sh, *note Programming in M4sh::) uses the prefix ‘as_fn_’ for its functions. Handling of positional parameters and shell options varies among shells. For example, Korn shells reset and restore trace output (‘set -x’) and other options upon function entry and exit. Inside a function, IRIX sh sets ‘$0’ to the function name. It is not portable to pass temporary environment variables to shell functions. Solaris 10 ‘/bin/sh’ does not see the variable. Meanwhile, not all shells follow the Posix rule that the assignment must affect the current environment in the same manner as special built-ins. $ /bin/sh -c 'func() { echo $a;}; a=1 func; echo $a' ⇒ ⇒ $ ash -c 'func() { echo $a;}; a=1 func; echo $a' ⇒1 ⇒ $ bash -c 'set -o posix; func() { echo $a;}; a=1 func; echo $a' ⇒1 ⇒1 Some ancient Bourne shell variants with function support did not reset ‘$I, I >= 0’, upon function exit, so effectively the arguments of the script were lost after the first function invocation. It is probably not worth worrying about these shells any more. With AIX sh, a ‘trap’ on 0 installed in a shell function triggers at function exit rather than at script exit. *Note Limitations of Shell Builtins: trap.  File: autoconf.info, Node: Limitations of Builtins, Next: Limitations of Usual Tools, Prev: Shell Functions, Up: Portable Shell 11.14 Limitations of Shell Builtins =================================== No, no, we are serious: some shells do have limitations! :) You should always keep in mind that any builtin or command may support options, and therefore differ in behavior with arguments starting with a dash. For instance, even the innocent ‘echo "$word"’ can give unexpected results when ‘word’ starts with a dash. It is often possible to avoid this problem using ‘echo "x$word"’, taking the ‘x’ into account later in the pipe. Many of these limitations can be worked around using M4sh (*note Programming in M4sh::). ‘.’ Use ‘.’ only with regular files (use ‘test -f’). Bash 2.03, for instance, chokes on ‘. /dev/null’. Remember that ‘.’ uses ‘PATH’ if its argument contains no slashes. Also, some shells, including bash 3.2, implicitly append the current directory to this ‘PATH’ search, even though Posix forbids it. So if you want to use ‘.’ on a file ‘foo’ in the current directory, you must use ‘. ./foo’. Not all shells gracefully handle syntax errors within a sourced file. On one extreme, some non-interactive shells abort the entire script. On the other, ‘zsh’ 4.3.10 has a bug where it fails to react to the syntax error. $ echo 'fi' > syntax $ bash -c '. ./syntax; echo $?' ./syntax: line 1: syntax error near unexpected token `fi' ./syntax: line 1: `fi' 1 $ ash -c '. ./syntax; echo $?' ./syntax: 1: Syntax error: "fi" unexpected $ zsh -c '. ./syntax; echo $?' ./syntax:1: parse error near `fi' 0 ‘!’ The Unix version 7 shell did not support negating the exit status of commands with ‘!’, and this feature is still absent from some shells (e.g., Solaris 10 ‘/bin/sh’). Other shells, such as FreeBSD ‘/bin/sh’ or ‘ash’, have bugs when using ‘!’: $ sh -c '! : | :'; echo $? 1 $ ash -c '! : | :'; echo $? 0 $ sh -c '! { :; }'; echo $? 1 $ ash -c '! { :; }'; echo $? {: not found Syntax error: "}" unexpected 2 Shell code like this: if ! cmp file1 file2 >/dev/null 2>&1; then echo files differ or trouble fi is therefore not portable in practice. Typically it is easy to rewrite such code, e.g.: cmp file1 file2 >/dev/null 2>&1 || echo files differ or trouble More generally, one can always rewrite ‘! COMMAND’ as: if COMMAND; then (exit 1); else :; fi ‘{...}’ Bash 3.2 (and earlier versions) sometimes does not properly set ‘$?’ when failing to write redirected output of a compound command. This problem is most commonly observed with ‘{...}’; it does not occur with ‘(...)’. For example: $ bash -c '{ echo foo; } >/bad; echo $?' bash: line 1: /bad: Permission denied 0 $ bash -c 'while :; do echo; done >/bad; echo $?' bash: line 1: /bad: Permission denied 0 To work around the bug, prepend ‘:;’: $ bash -c ':;{ echo foo; } >/bad; echo $?' bash: line 1: /bad: Permission denied 1 Posix requires a syntax error if a brace list has no contents. However, not all shells obey this rule; and on shells where empty lists are permitted, the effect on ‘$?’ is inconsistent. To avoid problems, ensure that a brace list is never empty. $ bash -c 'false; { }; echo $?' || echo $? bash: line 1: syntax error near unexpected token `}' bash: line 1: `false; { }; echo $?' 2 $ zsh -c 'false; { }; echo $?' || echo $? 1 $ pdksh -c 'false; { }; echo $?' || echo $? 0 ‘break’ The use of ‘break 2’ etc. is safe. ‘case’ You don’t need to quote the argument; no splitting is performed. You don’t need the final ‘;;’, but you should use it. Posix requires support for ‘case’ patterns with opening parentheses like this: case $file_name in (*.c) echo "C source code";; esac but the ‘(’ in this example is not portable to a few obsolescent Bourne shell implementations, which is a pity for those of us using tools that rely on balanced parentheses. For instance, with Solaris 10 ‘/bin/sh’: $ case foo in (foo) echo foo;; esac error→syntax error: `(' unexpected The leading ‘(’ can be omitted safely. Unfortunately, there are contexts where unbalanced parentheses cause other problems, such as when using a syntax-highlighting editor that searches for the balancing counterpart, or more importantly, when using a case statement as an underquoted argument to an Autoconf macro. *Note Balancing Parentheses::, for trade-offs involved in various styles of dealing with unbalanced ‘)’. Zsh handles pattern fragments derived from parameter expansions or command substitutions as though quoted: $ pat=\?; case aa in ?$pat) echo match;; esac $ pat=\?; case a? in ?$pat) echo match;; esac match Because of a bug in its ‘fnmatch’, Bash fails to properly handle backslashes in character classes: bash-2.02$ case /tmp in [/\\]*) echo OK;; esac bash-2.02$ This is extremely unfortunate, since you are likely to use this code to handle Posix or MS-DOS absolute file names. To work around this bug, always put the backslash first: bash-2.02$ case '\TMP' in [\\/]*) echo OK;; esac OK bash-2.02$ case /tmp in [\\/]*) echo OK;; esac OK Many Bourne shells cannot handle closing brackets in character classes correctly. Some shells also have problems with backslash escaping in case you do not want to match the backslash: both a backslash and the escaped character match this pattern. To work around this, specify the character class in a variable, so that quote removal does not apply afterwards, and the special characters don’t have to be backslash-escaped: $ case '\' in [\<]) echo OK;; esac OK $ scanset='[<]'; case '\' in $scanset) echo OK;; esac $ Even with this, Solaris ‘ksh’ matches a backslash if the set contains any of the characters ‘|’, ‘&’, ‘(’, or ‘)’. Conversely, Tru64 ‘ksh’ (circa 2003) erroneously always matches a closing parenthesis if not specified in a character class: $ case foo in *\)*) echo fail ;; esac fail $ case foo in *')'*) echo fail ;; esac fail Some shells, such as Ash 0.3.8, are confused by an empty ‘case’/‘esac’: ash-0.3.8 $ case foo in esac; error→Syntax error: ";" unexpected (expecting ")") Posix requires ‘case’ to give an exit status of 0 if no cases match. However, ‘/bin/sh’ in Solaris 10 does not obey this rule. Meanwhile, it is unclear whether a case that matches, but contains no statements, must also change the exit status to 0. The M4sh macro ‘AS_CASE’ works around these inconsistencies. $ bash -c 'case `false` in ?) ;; esac; echo $?' 0 $ /bin/sh -c 'case `false` in ?) ;; esac; echo $?' 255 ‘cd’ Posix 1003.1-2001 requires that ‘cd’ must support the ‘-L’ (“logical”) and ‘-P’ (“physical”) options, with ‘-L’ being the default. However, traditional shells do not support these options, and their ‘cd’ command has the ‘-P’ behavior. Portable scripts should assume neither option is supported, and should assume neither behavior is the default. This can be a bit tricky, since the Posix default behavior means that, for example, ‘ls ..’ and ‘cd ..’ may refer to different directories if the current logical directory is a symbolic link. It is safe to use ‘cd DIR’ if DIR contains no ‘..’ components. Also, Autoconf-generated scripts check for this problem when computing variables like ‘ac_top_srcdir’ (*note Configuration Actions::), so it is safe to ‘cd’ to these variables. Posix states that behavior is undefined if ‘cd’ is given an explicit empty argument. Some shells do nothing, some change to the first entry in ‘CDPATH’, some change to ‘HOME’, and some exit the shell rather than returning an error. Unfortunately, this means that if ‘$var’ is empty, then ‘cd "$var"’ is less predictable than ‘cd $var’ (at least the latter is well-behaved in all shells at changing to ‘HOME’, although this is probably not what you wanted in a script). You should check that a directory name was supplied before trying to change locations. *Note Special Shell Variables::, for portability problems involving ‘cd’ and the ‘CDPATH’ environment variable. Also please see the discussion of the ‘pwd’ command. ‘echo’ The simple ‘echo’ is probably the most surprising source of portability troubles. It is not possible to use ‘echo’ portably unless both options and escape sequences are omitted. Don’t expect any option. Do not use backslashes in the arguments, as there is no consensus on their handling. For ‘echo '\n' | wc -l’, the ‘sh’ of Solaris 10 outputs 2, but Bash and Zsh (in ‘sh’ emulation mode) output 1. The problem is truly ‘echo’: all the shells understand ‘'\n'’ as the string composed of a backslash and an ‘n’. Within a command substitution, ‘echo 'string\c'’ will mess up the internal state of ksh88 on AIX 6.1 so that it will print the first character ‘s’ only, followed by a newline, and then entirely drop the output of the next echo in a command substitution. Because of these problems, do not pass a string containing arbitrary characters to ‘echo’. For example, ‘echo "$foo"’ is safe only if you know that FOO’s value cannot contain backslashes and cannot start with ‘-’. Normally, ‘printf’ is safer and easier to use than ‘echo’ and ‘echo -n’. Thus, you should use ‘printf "%s\n"’ instead of ‘echo’, and similarly use ‘printf %s’ instead of ‘echo -n’. Older scripts, written before ‘printf’ was portable, sometimes used a here-document as a safer alternative to ‘echo’, like this: cat <1 | head -n1 sh: syntax error at line 1: `;' unexpected $ make bad list='a b' a b $ make good $ make good list='a b' a b In Solaris 10 ‘/bin/sh’, when the list of arguments of a ‘for’ loop starts with _unquoted_ tokens looking like variable assignments, the loop is not executed on those tokens: $ /bin/sh -c 'for v in a=b c=d x e=f; do echo $v; done' x e=f Thankfully, quoting the assignment-like tokens, or starting the list with other tokens (including unquoted variable expansion that results in an assignment-like result), avoids the problem, so it is easy to work around: $ /bin/sh -c 'for v in "a=b"; do echo $v; done' a=b $ /bin/sh -c 'x=a=b; for v in $x c=d; do echo $v; done' a=b c=d ‘if’ Using ‘!’ is not portable. Instead of: if ! cmp -s file file.new; then mv file.new file fi use: if cmp -s file file.new; then :; else mv file.new file fi Or, especially if the “else” branch is short, you can use ‘||’. In M4sh, the ‘AS_IF’ macro provides an easy way to write these kinds of conditionals: AS_IF([cmp -s file file.new], [], [mv file.new file]) This is especially useful in other M4 macros, where the “then” and “else” branches might be macro arguments. Some very old shells did not reset the exit status from an ‘if’ with no ‘else’: $ if (exit 42); then true; fi; echo $? 42 whereas a proper shell should have printed ‘0’. But this is no longer a portability problem; any shell that supports functions gets it correct. However, it explains why some makefiles have lengthy constructs: if test -f "$file"; then install "$file" "$dest" else : fi ‘printf’ A format string starting with a ‘-’ can cause problems. Bash interprets it as an option and gives an error. And ‘--’ to mark the end of options is not good in the NetBSD Almquist shell (e.g., 0.4.6) which takes that literally as the format string. Putting the ‘-’ in a ‘%c’ or ‘%s’ is probably easiest: printf %s -foo AIX 7.2 ‘sh’ mishandles octal escapes in multi-byte locales by treating them as characters instead of bytes. For example, in a locale using the UTF-8 encoding, ‘printf '\351'’ outputs the two bytes C3, A9 (the UTF-8 encoding for U+00E9) instead of the desired single byte E9. To work around the bug, use the C locale. Bash 2.03 mishandles an escape sequence that happens to evaluate to ‘%’: $ printf '\045' bash: printf: `%': missing format character Large outputs may cause trouble. On Solaris 2.5.1 through 10, for example, ‘/usr/bin/printf’ is buggy, so when using ‘/bin/sh’ the command ‘printf %010000x 123’ normally dumps core. Since ‘printf’ is not always a shell builtin, there is a potential speed penalty for using ‘printf '%s\n'’ as a replacement for an ‘echo’ that does not interpret ‘\’ or leading ‘-’. With Solaris ‘ksh’, it is possible to use ‘print -r --’ for this role instead. *Note Limitations of Shell Builtins: echo, for a discussion of portable alternatives to both ‘printf’ and ‘echo’. ‘pwd’ With modern shells, plain ‘pwd’ outputs a “logical” directory name, some of whose components may be symbolic links. These directory names are in contrast to “physical” directory names, whose components are all directories. Posix 1003.1-2001 requires that ‘pwd’ must support the ‘-L’ (“logical”) and ‘-P’ (“physical”) options, with ‘-L’ being the default. However, traditional shells do not support these options, and their ‘pwd’ command has the ‘-P’ behavior. Portable scripts should assume neither option is supported, and should assume neither behavior is the default. Also, on many hosts ‘/bin/pwd’ is equivalent to ‘pwd -P’, but Posix does not require this behavior and portable scripts should not rely on it. Typically it’s best to use plain ‘pwd’. On modern hosts this outputs logical directory names, which have the following advantages: • Logical names are what the user specified. • Physical names may not be portable from one installation host to another due to network file system gymnastics. • On modern hosts ‘pwd -P’ may fail due to lack of permissions to some parent directory, but plain ‘pwd’ cannot fail for this reason. Also please see the discussion of the ‘cd’ command. ‘read’ No options are portable, not even support ‘-r’ (Solaris 10 ‘/bin/sh’ for example). Tru64/OSF 5.1 ‘sh’ treats ‘read’ as a special built-in, so it may exit if input is redirected from a non-existent or unreadable file. ‘set’ With the FreeBSD 6.0 shell, the ‘set’ command (without any options) does not sort its output. The ‘set’ builtin faces the usual problem with arguments starting with a dash. Modern shells such as Bash or Zsh understand ‘--’ to specify the end of the options (any argument after ‘--’ is a parameter, even ‘-x’ for instance), but many traditional shells (e.g., Solaris 10 ‘/bin/sh’) simply stop option processing as soon as a non-option argument is found. Therefore, use ‘dummy’ or simply ‘x’ to end the option processing, and use ‘shift’ to pop it out: set x $my_list; shift Avoid ‘set -’, e.g., ‘set - $my_list’. Posix no longer requires support for this command, and in traditional shells ‘set - $my_list’ resets the ‘-v’ and ‘-x’ options, which makes scripts harder to debug. Some nonstandard shells do not recognize more than one option (e.g., ‘set -e -x’ assigns ‘-x’ to the command line). It is better to combine them: set -ex The ‘-e’ option has historically been under-specified, with enough ambiguities to cause numerous differences across various shell implementations; see for example this overview (https://www.in-ulm.de/~mascheck/various/set-e/), or this link (https://www.austingroupbugs.net/view.php?id=52), documenting a change to Posix 2008 to match ‘ksh88’ behavior. Note that mixing ‘set -e’ and shell functions is asking for surprises: set -e doit() { rm file echo one } doit || echo two According to the recommendation, ‘one’ should always be output regardless of whether the ‘rm’ failed, because it occurs within the body of the shell function ‘doit’ invoked on the left side of ‘||’, where the effects of ‘set -e’ are not enforced. Likewise, ‘two’ should never be printed, since the failure of ‘rm’ does not abort the function, such that the status of ‘doit’ is 0. The BSD shell has had several problems with the ‘-e’ option. Older versions of the BSD shell (circa 1990) mishandled ‘&&’, ‘||’, ‘if’, and ‘case’ when ‘-e’ was in effect, causing the shell to exit unexpectedly in some cases. This was particularly a problem with makefiles, and led to circumlocutions like ‘sh -c 'test -f file || touch file'’, where the seemingly-unnecessary ‘sh -c '...'’ wrapper works around the bug (*note Failure in Make Rules::). Even relatively-recent versions of the BSD shell (e.g., OpenBSD 3.4) wrongly exit with ‘-e’ if the last command within a compound statement fails and is guarded by an ‘&&’ only. For example: #! /bin/sh set -e foo='' test -n "$foo" && exit 1 echo one if :; then test -n "$foo" && exit 1 echo two test -n "$foo" && exit 1 fi echo three does not print ‘three’. One workaround is to change the last instance of ‘test -n "$foo" && exit 1’ to be ‘if test -n "$foo"; then exit 1; fi’ instead. Another possibility is to warn BSD users not to use ‘sh -e’. When ‘set -e’ is in effect, a failed command substitution in Solaris 10 ‘/bin/sh’ cannot be ignored, even with ‘||’. $ /bin/sh -c 'set -e; foo=`false` || echo foo; echo bar' $ bash -c 'set -e; foo=`false` || echo foo; echo bar' foo bar Moreover, a command substitution, successful or not, causes this shell to exit from a failing outer command even in presence of an ‘&&’ list: $ bash -c 'set -e; false `true` && echo notreached; echo ok' ok $ sh -c 'set -e; false `true` && echo notreached; echo ok' $ Portable scripts should not use ‘set -e’ if ‘trap’ is used to install an exit handler. This is because Tru64/OSF 5.1 ‘sh’ sometimes enters the trap handler with the exit status of the command prior to the one that triggered the errexit handler: $ sh -ec 'trap '\''echo $?'\'' 0; false' 0 $ sh -c 'set -e; trap '\''echo $?'\'' 0; false' 1 Thus, when writing a script in M4sh, rather than trying to rely on ‘set -e’, it is better to append ‘|| AS_EXIT’ to any statement where it is desirable to abort on failure. Job control is not provided by all shells, so the use of ‘set -m’ or ‘set -b’ must be done with care. When using ‘zsh’ in native mode, asynchronous notification (‘set -b’) is enabled by default, and using ‘emulate sh’ to switch to Posix mode does not clear this setting (although asynchronous notification has no impact unless job monitoring is also enabled). Also, ‘zsh’ 4.3.10 and earlier have a bug where job control can be manipulated in interactive shells, but not in subshells or scripts. Furthermore, some shells, like ‘pdksh’, fail to treat subshells as interactive, even though the parent shell was. $ echo $ZSH_VERSION 4.3.10 $ set -m; echo $? 0 $ zsh -c 'set -m; echo $?' set: can't change option: -m $ (set -m); echo $? set: can't change option: -m 1 $ pdksh -ci 'echo $-; (echo $-)' cim c Use of ‘set -n’ (typically via ‘sh -n script’) to validate a script is not foolproof. Modern ‘ksh93’ tries to be helpful by informing you about better syntax, but switching the script to use the suggested syntax in order to silence the warnings would render the script no longer portable to older shells: $ ksh -nc '``' ksh: warning: line 1: `...` obsolete, use $(...) 0 Furthermore, on ancient hosts, such as SunOS 4, ‘sh -n’ could go into an infinite loop; even with that bug fixed, Solaris 8 ‘/bin/sh’ takes extremely long to parse large scripts. Autoconf itself uses ‘sh -n’ within its testsuite to check that correct scripts were generated, but only after first probing for other shell features (such as ‘test ${BASH_VERSION+y}’) that indicate a reasonably fast and working implementation. ‘shift’ Not only is ‘shift’ing a bad idea when there is nothing left to shift, but in addition it is not portable: the shell of MIPS RISC/OS 4.52 refuses to do it. Don’t use ‘shift 2’ etc.; while it in the SVR1 shell (1983), it is also absent in many pre-Posix shells. ‘source’ This command is not portable, as Posix does not require it; use ‘.’ instead. ‘test’ The ‘test’ program is the way to perform many file and string tests. It is often invoked by the alternate name ‘[’, but using that name in Autoconf code is asking for trouble since it is an M4 quote character. The ‘-a’, ‘-o’, ‘(’, and ‘)’ operands are not present in all implementations, and have been marked obsolete by Posix 2008. This is because there are inherent ambiguities in using them. For example, ‘test "$1" -a "$2"’ looks like a binary operator to check whether two strings are both non-empty, but if ‘$1’ is the literal ‘!’, then some implementations of ‘test’ treat it as a negation of the unary operator ‘-a’. Thus, portable uses of ‘test’ should never have more than four arguments, and scripts should use shell constructs like ‘&&’ and ‘||’ instead. If you combine ‘&&’ and ‘||’ in the same statement, keep in mind that they have equal precedence, so it is often better to parenthesize even when this is redundant. For example: # Not portable: test "X$a" = "X$b" -a \ '(' "X$c" != "X$d" -o "X$e" = "X$f" ')' # Portable: test "X$a" = "X$b" && { test "X$c" != "X$d" || test "X$e" = "X$f"; } ‘test’ does not process options like most other commands do; for example, it does not recognize the ‘--’ argument as marking the end of options. It is safe to use ‘!’ as a ‘test’ operator. For example, ‘if test ! -d foo; ...’ is portable even though ‘if ! test -d foo; ...’ is not. ‘test’ (files) To enable ‘configure’ scripts to support cross-compilation, they shouldn’t do anything that tests features of the build system instead of the host system. But occasionally you may find it necessary to check whether some arbitrary file exists. To do so, use ‘test -f’, ‘test -r’, or ‘test -x’. Do not use ‘test -e’, because Solaris 10 ‘/bin/sh’ lacks it. To test for symbolic links on systems that have them, use ‘test -h’ rather than ‘test -L’; either form conforms to Posix 1003.1-2001, but older shells like Solaris 8 ‘/bin/sh’ support only ‘-h’. For historical reasons, Posix reluctantly allows implementations of ‘test -x’ that will succeed for the root user, even if no execute permissions are present. Furthermore, shells do not all agree on whether Access Control Lists should affect ‘test -r’, ‘test -w’, and ‘test -x’; some shells base test results strictly on the current user id compared to file owner and mode, as if by ‘stat(2)’; while other shells base test results on whether the current user has the given right, even if that right is only granted by an ACL, as if by ‘faccessat(2)’. Furthermore, there is a classic time of check to time of use race between any use of ‘test’ followed by operating on the just-checked file. Therefore, it is a good idea to write scripts that actually attempt an operation, and are prepared for the resulting failure if permission is denied, rather than trying to avoid an operation based solely on whether ‘test’ guessed that it might not be permitted. ‘test’ (strings) Posix says that ‘test "STRING"’ succeeds if STRING is not null, but this usage is not portable to traditional platforms like Solaris 10 ‘/bin/sh’, which mishandle strings like ‘!’ and ‘-n’. However, it _is_ portable to test if a variable is set to a non-empty value, by using ‘test ${var+y}’, since all known implementations properly distinguish between no arguments and a known-safe string of ‘y’. Posix also says that ‘test ! "STRING"’, ‘test -n "STRING"’ and ‘test -z "STRING"’ work with any string, but many shells (such as Solaris 10, AIX 3.2, UNICOS 10.0.0.6, Digital Unix 4, etc.) get confused if STRING looks like an operator: $ test -n = test: argument expected $ test ! -n test: argument expected $ test -z ")"; echo $? 0 Similarly, Posix says that both ‘test "STRING1" = "STRING2"’ and ‘test "STRING1" != "STRING2"’ work for any pairs of strings, but in practice this is not true for troublesome strings that look like operators or parentheses, or that begin with ‘-’. It is best to protect such strings with a leading ‘X’, e.g., ‘test "XSTRING" != X’ rather than ‘test -n "STRING"’ or ‘test ! "STRING"’. It is common to find variations of the following idiom: test -n "`echo $ac_feature | sed 's/[-a-zA-Z0-9_]//g'`" && ACTION to take an action when a token matches a given pattern. Such constructs should be avoided by using: case $ac_feature in *[!-a-zA-Z0-9_]*) ACTION;; esac If the pattern is a complicated regular expression that cannot be expressed as a shell pattern, use something like this instead: expr "X$ac_feature" : 'X.*[^-a-zA-Z0-9_]' >/dev/null && ACTION ‘expr "XFOO" : "XBAR"’ is more robust than ‘echo "XFOO" | grep "^XBAR"’, because it avoids problems when ‘FOO’ contains backslashes. ‘trap’ It is safe to trap at least the signals 1, 2, 13, and 15. You can also trap 0, i.e., have the ‘trap’ run when the script ends (either via an explicit ‘exit’, or the end of the script). The trap for 0 should be installed outside of a shell function, or AIX 5.3 ‘/bin/sh’ will invoke the trap at the end of this function. Posix says that ‘trap - 1 2 13 15’ resets the traps for the specified signals to their default values, but many common shells (e.g., Solaris 10 ‘/bin/sh’) misinterpret this and attempt to execute a “command” named ‘-’ when the specified conditions arise. Posix 2008 also added a requirement to support ‘trap 1 2 13 15’ to reset traps, as this is supported by a larger set of shells, but there are still shells like ‘dash’ that mistakenly try to execute ‘1’ instead of resetting the traps. Therefore, there is no portable workaround, except for ‘trap - 0’, for which ‘trap '' 0’ is a portable substitute. Although Posix is not absolutely clear on this point, it is widely admitted that when entering the trap ‘$?’ should be set to the exit status of the last command run before the trap. The ambiguity can be summarized as: “when the trap is launched by an ‘exit’, what is the _last_ command run: that before ‘exit’, or ‘exit’ itself?” Bash considers ‘exit’ to be the last command, while Zsh and Solaris 10 ‘/bin/sh’ consider that when the trap is run it is _still_ in the ‘exit’, hence it is the previous exit status that the trap receives: $ cat trap.sh trap 'echo $?' 0 (exit 42); exit 0 $ zsh trap.sh 42 $ bash trap.sh 0 The portable solution is then simple: when you want to ‘exit 42’, run ‘(exit 42); exit 42’, the first ‘exit’ being used to set the exit status to 42 for Zsh, and the second to trigger the trap and pass 42 as exit status for Bash. In M4sh, this is covered by using ‘AS_EXIT’. The shell in FreeBSD 4.0 has the following bug: ‘$?’ is reset to 0 by empty lines if the code is inside ‘trap’. $ trap 'false echo $?' 0 $ exit 0 Fortunately, this bug only affects ‘trap’. Several shells fail to execute an exit trap that is defined inside a subshell, when the last command of that subshell is not a builtin. A workaround is to use ‘exit $?’ as the shell builtin. $ bash -c '(trap "echo hi" 0; /bin/true)' hi $ /bin/sh -c '(trap "echo hi" 0; /bin/true)' $ /bin/sh -c '(trap "echo hi" 0; /bin/true; exit $?)' hi Likewise, older implementations of ‘bash’ failed to preserve ‘$?’ across an exit trap consisting of a single cleanup command. $ bash -c 'trap "/bin/true" 0; exit 2'; echo $? 2 $ bash-2.05b -c 'trap "/bin/true" 0; exit 2'; echo $? 0 $ bash-2.05b -c 'trap ":; /bin/true" 0; exit 2'; echo $? 2 Be aware that a trap can be called from any number of places in your script, and therefore the trap handler should not make assumptions about shell state. For some examples, if your script temporarily modifies ‘IFS’, then the trap should include an initialization back to its typical value of space-tab-newline (autoconf does this for generated ‘configure’ files). Likewise, if your script changes the current working directory at some point after the trap is installed, then your trap cannot assume which directory it is in, and should begin by changing directories to an absolute path if that is important to the cleanup efforts (autotest does this for generated ‘testsuite’ files). ‘true’ Don’t worry: as far as we know ‘true’ is portable. Nevertheless, it’s not always a builtin (e.g., Bash 1.x), and the portable shell community tends to prefer using ‘:’. This has a funny side effect: when asked whether ‘false’ is more portable than ‘true’ Alexandre Oliva answered: In a sense, yes, because if it doesn’t exist, the shell will produce an exit status of failure, which is correct for ‘false’, but not for ‘true’. Remember that even though ‘:’ ignores its arguments, it still takes time to compute those arguments. It is a good idea to use double quotes around any arguments to ‘:’ to avoid time spent in field splitting and file name expansion. ‘unset’ In some nonconforming shells (e.g., Solaris 10 ‘/bin/ksh’ and ‘/usr/xpg4/bin/sh’, NetBSD 5.99.43 sh, or Bash 2.05a), ‘unset FOO’ fails when ‘FOO’ is not set. This can interfere with ‘set -e’ operation. You can use FOO=; unset FOO if you are not sure that ‘FOO’ is set. A few ancient shells lack ‘unset’ entirely. For some variables such as ‘PS1’, you can use a neutralizing value instead: PS1='$ ' Usually, shells that do not support ‘unset’ need less effort to make the environment sane, so for example is not a problem if you cannot unset ‘CDPATH’ on those shells. However, Bash 2.01 mishandles ‘unset MAIL’ and ‘unset MAILPATH’ in some cases and dumps core. So, you should do something like ( (unset MAIL) || exit 1) >/dev/null 2>&1 && unset MAIL || : *Note Special Shell Variables::, for some neutralizing values. Also, see *note Limitations of Builtins: export, for the case of environment variables. ‘wait’ The exit status of ‘wait’ is not always reliable.  File: autoconf.info, Node: Limitations of Usual Tools, Prev: Limitations of Builtins, Up: Portable Shell 11.15 Limitations of Usual Tools ================================ The small set of tools you can expect to find on any machine can still include some limitations you should be aware of. ‘awk’ Don’t leave white space before the opening parenthesis in a user function call. Posix does not allow this and GNU Awk rejects it: $ gawk 'function die () { print "Aaaaarg!" } BEGIN { die () }' gawk: cmd. line:2: BEGIN { die () } gawk: cmd. line:2: ^ parse error $ gawk 'function die () { print "Aaaaarg!" } BEGIN { die() }' Aaaaarg! Posix says that if a program contains only ‘BEGIN’ actions, and contains no instances of ‘getline’, then the program merely executes the actions without reading input. However, traditional Awk implementations (such as Solaris 10 ‘awk’) read and discard input in this case. Portable scripts can redirect input from ‘/dev/null’ to work around the problem. For example: awk 'BEGIN {print "hello world"}' printf "foo\n|foo\n" | $EGREP '^(|foo|bar)$' |foo > printf "bar\nbar|\n" | $EGREP '^(foo|bar|)$' bar| > printf "foo\nfoo|\n|bar\nbar\n" | $EGREP '^(foo||bar)$' foo |bar ‘$EGREP’ also suffers the limitations of ‘grep’ (*note Limitations of Usual Tools: grep.). ‘expr’ Not all implementations obey the Posix rule that ‘--’ separates options from arguments; likewise, not all implementations provide the extension to Posix that the first argument can be treated as part of a valid expression rather than an invalid option if it begins with ‘-’. When performing arithmetic, use ‘expr 0 + $var’ if ‘$var’ might be a negative number, to keep ‘expr’ from interpreting it as an option. No ‘expr’ keyword starts with ‘X’, so use ‘expr X"WORD" : 'XREGEX'’ to keep ‘expr’ from misinterpreting WORD. Don’t use ‘length’, ‘substr’, ‘match’ and ‘index’. ‘expr’ (‘|’) You can use ‘|’. Although Posix does require that ‘expr ''’ return the empty string, it does not specify the result when you ‘|’ together the empty string (or zero) with the empty string. For example: expr '' \| '' Posix 1003.2-1992 returns the empty string for this case, but traditional Unix returns ‘0’ (Solaris is one such example). In Posix 1003.1-2001, the specification was changed to match traditional Unix’s behavior (which is bizarre, but it’s too late to fix this). Please note that the same problem does arise when the empty string results from a computation, as in: expr bar : foo \| foo : bar Avoid this portability problem by avoiding the empty string. ‘expr’ (‘:’) Portable ‘expr’ regular expressions should use ‘\’ to escape only characters in the string ‘$()*.0123456789[\^n{}’. For example, alternation, ‘\|’, is common but Posix does not require its support, so it should be avoided in portable scripts. Similarly, ‘\+’ and ‘\?’ should be avoided. Portable ‘expr’ regular expressions should not begin with ‘^’. Patterns are automatically anchored so leading ‘^’ is not needed anyway. On the other hand, the behavior of the ‘$’ anchor is not portable on multi-line strings. Posix is ambiguous whether the anchor applies to each line, as was done in older versions of the GNU Core Utilities, or whether it applies only to the end of the overall string, as in Coreutils 6.0 and most other implementations. $ baz='foo > bar' $ expr "X$baz" : 'X\(foo\)$' $ expr-5.97 "X$baz" : 'X\(foo\)$' foo The Posix standard is ambiguous as to whether ‘expr 'a' : '\(b\)'’ outputs ‘0’ or the empty string. In practice, it outputs the empty string on most platforms, but portable scripts should not assume this. For instance, the QNX 4.25 native ‘expr’ returns ‘0’. One might think that a way to get a uniform behavior would be to use the empty string as a default value: expr a : '\(b\)' \| '' Unfortunately this behaves exactly as the original expression; see the ‘expr’ (‘|’) entry for more information. Some ancient ‘expr’ implementations (e.g., SunOS 4 ‘expr’ and Solaris 8 ‘/usr/ucb/expr’) have a silly length limit that causes ‘expr’ to fail if the matched substring is longer than 120 bytes. In this case, you might want to fall back on ‘echo|sed’ if ‘expr’ fails. Nowadays this is of practical importance only for the rare installer who mistakenly puts ‘/usr/ucb’ before ‘/usr/bin’ in ‘PATH’. On Mac OS X 10.4, ‘expr’ mishandles the pattern ‘[^-]’ in some cases. For example, the command expr Xpowerpc-apple-darwin8.1.0 : 'X[^-]*-[^-]*-\(.*\)' outputs ‘apple-darwin8.1.0’ rather than the correct ‘darwin8.1.0’. This particular case can be worked around by substituting ‘[^--]’ for ‘[^-]’. Don’t leave, there is some more! The QNX 4.25 ‘expr’, in addition of preferring ‘0’ to the empty string, has a funny behavior in its exit status: it’s always 1 when parentheses are used! $ val=`expr 'a' : 'a'`; echo "$?: $val" 0: 1 $ val=`expr 'a' : 'b'`; echo "$?: $val" 1: 0 $ val=`expr 'a' : '\(a\)'`; echo "?: $val" 1: a $ val=`expr 'a' : '\(b\)'`; echo "?: $val" 1: 0 In practice this can be a big problem if you are ready to catch failures of ‘expr’ programs with some other method (such as using ‘sed’), since you may get twice the result. For instance $ expr 'a' : '\(a\)' || echo 'a' | sed 's/^\(a\)$/\1/' outputs ‘a’ on most hosts, but ‘aa’ on QNX 4.25. A simple workaround consists of testing ‘expr’ and using a variable set to ‘expr’ or to ‘false’ according to the result. Tru64 ‘expr’ incorrectly treats the result as a number, if it can be interpreted that way: $ expr 00001 : '.*\(...\)' 1 On HP-UX 11, ‘expr’ only supports a single sub-expression. $ expr 'Xfoo' : 'X\(f\(oo\)*\)$' expr: More than one '\(' was used. ‘fgrep’ Although Posix stopped requiring ‘fgrep’ in 2001, a few traditional hosts (notably Solaris) do not support the Posix replacement ‘grep -F’. Also, some traditional implementations do not work on long input lines. To work around these problems, invoke ‘AC_PROG_FGREP’ and then use ‘$FGREP’. Tru64/OSF 5.1 ‘fgrep’ does not match an empty pattern. ‘find’ The ‘-maxdepth’ option seems to be GNU specific. Tru64 v5.1, NetBSD 1.5 and Solaris ‘find’ commands do not understand it. The replacement of ‘{}’ is guaranteed only if the argument is exactly _{}_, not if it’s only a part of an argument. For instance on DU, and HP-UX 10.20 and HP-UX 11: $ touch foo $ find . -name foo -exec echo "{}-{}" \; {}-{} while GNU ‘find’ reports ‘./foo-./foo’. ‘grep’ Portable scripts can rely on the ‘grep’ options ‘-c’, ‘-l’, ‘-n’, and ‘-v’, but should avoid other options. For example, don’t use ‘-w’, as Posix does not require it and Irix 6.5.16m’s ‘grep’ does not support it. Also, portable scripts should not combine ‘-c’ with ‘-l’, as Posix does not allow this. Some of the options required by Posix are not portable in practice. Don’t use ‘grep -q’ to suppress output, because traditional ‘grep’ implementations (e.g., Solaris) do not support ‘-q’. Don’t use ‘grep -s’ to suppress output either, because Posix says ‘-s’ does not suppress output, only some error messages; also, the ‘-s’ option of traditional ‘grep’ behaved like ‘-q’ does in most modern implementations. Instead, redirect the standard output and standard error (in case the file doesn’t exist) of ‘grep’ to ‘/dev/null’. Check the exit status of ‘grep’ to determine whether it found a match. The QNX4 implementation fails to count lines with ‘grep -c '$'’, but works with ‘grep -c '^'’. Other alternatives for counting lines are to use ‘sed -n '$='’ or ‘wc -l’. Some traditional ‘grep’ implementations do not work on long input lines. On AIX the default ‘grep’ silently truncates long lines on the input before matching. Also, traditional implementations do not support multiple regexps with ‘-e’: they either reject ‘-e’ entirely (e.g., Solaris) or honor only the last pattern (e.g., IRIX 6.5 and NeXT). To work around these problems, invoke ‘AC_PROG_GREP’ and then use ‘$GREP’. Another possible workaround for the multiple ‘-e’ problem is to separate the patterns by newlines, for example: grep 'foo bar' in.txt except that this fails with traditional ‘grep’ implementations and with OpenBSD 3.8 ‘grep’. Traditional ‘grep’ implementations (e.g., Solaris) do not support the ‘-E’ or ‘-F’ options. To work around these problems, invoke ‘AC_PROG_EGREP’ and then use ‘$EGREP’, and similarly for ‘AC_PROG_FGREP’ and ‘$FGREP’. Even if you are willing to require support for Posix ‘grep’, your script should not use both ‘-E’ and ‘-F’, since Posix does not allow this combination. Portable ‘grep’ regular expressions should use ‘\’ only to escape characters in the string ‘$()*.0123456789[\^{}’. For example, alternation, ‘\|’, is common but Posix does not require its support in basic regular expressions, so it should be avoided in portable scripts. Solaris and HP-UX ‘grep’ do not support it. Similarly, the following escape sequences should also be avoided: ‘\<’, ‘\>’, ‘\+’, ‘\?’, ‘\`’, ‘\'’, ‘\B’, ‘\b’, ‘\S’, ‘\s’, ‘\W’, and ‘\w’. Posix does not specify the behavior of ‘grep’ on binary files. An example where this matters is using BSD ‘grep’ to search text that includes embedded ANSI escape sequences for colored output to terminals (‘\033[m’ is the sequence to restore normal output); the behavior depends on whether input is seekable: $ printf 'esc\033[mape\n' > sample $ grep . sample Binary file sample matches $ cat sample | grep . escape ‘join’ Solaris 8 ‘join’ has bugs when the second operand is standard input, and when standard input is a pipe. For example, the following shell script causes Solaris 8 ‘join’ to loop forever: cat >file <<'EOF' 1 x 2 y EOF cat file | join file - Use ‘join - file’ instead. On NetBSD, ‘join -a 1 file1 file2’ mistakenly behaves like ‘join -a 1 -a 2 1 file1 file2’, resulting in a usage warning; the workaround is to use ‘join -a1 file1 file2’ instead. ‘ln’ The ‘-f’ option is portable nowadays. Symbolic links are not available on some systems; use ‘$(LN_S)’ as a portable substitute. For versions of the DJGPP before 2.04, ‘ln’ emulates symbolic links to executables by generating a stub that in turn calls the real program. This feature also works with nonexistent files like in the Posix spec. So ‘ln -s file link’ generates ‘link.exe’, which attempts to call ‘file.exe’ if run. But this feature only works for executables, so ‘cp -p’ is used instead for these systems. DJGPP versions 2.04 and later have full support for symbolic links. ‘ls’ The portable options are ‘-acdilrtu’. Current practice is for ‘-l’ to output both owner and group, even though ancient versions of ‘ls’ omitted the group. On ancient hosts, ‘ls foo’ sent the diagnostic ‘foo not found’ to standard output if ‘foo’ did not exist. Hence a shell command like ‘sources=`ls *.c 2>/dev/null`’ did not always work, since it was equivalent to ‘sources='*.c not found'’ in the absence of ‘.c’ files. This is no longer a practical problem, since current ‘ls’ implementations send diagnostics to standard error. The behavior of ‘ls’ on a directory that is being concurrently modified is not always predictable, because of a data race where cached information returned by ‘readdir’ does not match the current directory state. In fact, MacOS 10.5 has an intermittent bug where ‘readdir’, and thus ‘ls’, sometimes lists a file more than once if other files were added or removed from the directory immediately prior to the ‘ls’ call. Since ‘ls’ already sorts its output, the duplicate entries can be avoided by piping the results through ‘uniq’. ‘mkdir’ No ‘mkdir’ option is portable to older systems. Instead of ‘mkdir -p FILE-NAME’, you should use ‘AS_MKDIR_P(FILE-NAME)’ (*note Programming in M4sh::) or ‘AC_PROG_MKDIR_P’ (*note Particular Programs::). Combining the ‘-m’ and ‘-p’ options, as in ‘mkdir -m go-w -p DIR’, often leads to trouble. FreeBSD ‘mkdir’ incorrectly attempts to change the permissions of DIR even if it already exists. HP-UX 11.23 and IRIX 6.5 ‘mkdir’ often assign the wrong permissions to any newly-created parents of DIR. Posix does not clearly specify whether ‘mkdir -p foo’ should succeed when ‘foo’ is a symbolic link to an already-existing directory. The GNU Core Utilities 5.1.0 ‘mkdir’ succeeds, but Solaris ‘mkdir’ fails. Traditional ‘mkdir -p’ implementations suffer from race conditions. For example, if you invoke ‘mkdir -p a/b’ and ‘mkdir -p a/c’ at the same time, both processes might detect that ‘a’ is missing, one might create ‘a’, then the other might try to create ‘a’ and fail with a ‘File exists’ diagnostic. The GNU Core Utilities (‘fileutils’ version 4.1), FreeBSD 5.0, NetBSD 2.0.2, and OpenBSD 2.4 are known to be race-free when two processes invoke ‘mkdir -p’ simultaneously, but earlier versions are vulnerable. Solaris ‘mkdir’ is still vulnerable as of Solaris 10, and other traditional Unix systems are probably vulnerable too. This possible race is harmful in parallel builds when several Make rules call ‘mkdir -p’ to construct directories. You may use ‘install-sh -d’ as a safe replacement, provided this script is recent enough; the copy shipped with Autoconf 2.60 and Automake 1.10 is OK, but copies from older versions are vulnerable. ‘mkfifo’ ‘mknod’ The GNU Coding Standards state that ‘mknod’ is safe to use on platforms where it has been tested to exist; but it is generally portable only for creating named FIFOs, since device numbers are platform-specific. Autotest uses ‘mkfifo’ to implement parallel testsuites. Posix states that behavior is unspecified when opening a named FIFO for both reading and writing; on at least Cygwin, this results in failure on any attempt to read or write to that file descriptor. ‘mktemp’ Shell scripts can use temporary files safely with ‘mktemp’, but it does not exist on all systems. A portable way to create a safe temporary file name is to create a temporary directory with mode 700 and use a file inside this directory. Both methods prevent attackers from gaining control, though ‘mktemp’ is far less likely to fail gratuitously under attack. Here is sample code to create a new temporary directory ‘$dir’ safely: # Create a temporary directory $dir in $TMPDIR (default /tmp). # Use mktemp if possible; otherwise fall back on mkdir, # with $RANDOM to make collisions less likely. : "${TMPDIR:=/tmp}" { dir=` (umask 077 && mktemp -d "$TMPDIR/fooXXXXXX") 2>/dev/null ` && test -d "$dir" } || { dir=$TMPDIR/foo$$-$RANDOM (umask 077 && mkdir "$dir") } || exit $? ‘mv’ The only portable options are ‘-f’ and ‘-i’. Moving individual files between file systems is portable (it was in Unix version 6), but it is not always atomic: when doing ‘mv new existing’, there’s a critical section where neither the old nor the new version of ‘existing’ actually exists. On some systems moving files from ‘/tmp’ can sometimes cause undesirable (but perfectly valid) warnings, even if you created these files. This is because ‘/tmp’ belongs to a group that ordinary users are not members of, and files created in ‘/tmp’ inherit the group of ‘/tmp’. When the file is copied, ‘mv’ issues a diagnostic without failing: $ touch /tmp/foo $ mv /tmp/foo . error→mv: ./foo: set owner/group (was: 100/0): Operation not permitted $ echo $? 0 $ ls foo foo This annoying behavior conforms to Posix, unfortunately. Moving directories across mount points is not portable, use ‘cp’ and ‘rm’. DOS variants cannot rename or remove open files, and do not support commands like ‘mv foo bar >foo’, even though this is perfectly portable among Posix hosts. ‘od’ In MacOS X versions prior to 10.4.3, ‘od’ does not support the standard Posix options ‘-A’, ‘-j’, ‘-N’, or ‘-t’, or the XSI option, ‘-s’. The only supported Posix option is ‘-v’, and the only supported XSI options are those in ‘-bcdox’. The BSD ‘hexdump’ program can be used instead. In some versions of some operating systems derived from Solaris 11, ‘od’ prints decimal byte values padded with zeroes rather than with spaces: $ printf '#!' | od -A n -t d1 -N 2 035 033 instead of $ printf '#!' | od -A n -t d1 -N 2 35 33 We have observed this on both OpenIndiana and OmniOS; Illumos may also be affected. As a workaround, you can use octal output (option ‘-t o1’). ‘rm’ The ‘-f’ and ‘-r’ options are portable. It is not portable to invoke ‘rm’ without options or operands. On the other hand, Posix now requires ‘rm -f’ to silently succeed when there are no operands (useful for constructs like ‘rm -rf $filelist’ without first checking if ‘$filelist’ was empty). But this was not always portable; at least NetBSD ‘rm’ built before 2008 would fail with a diagnostic. A file might not be removed even if its parent directory is writable and searchable. Many Posix hosts cannot remove a mount point, a named stream, a working directory, or a last link to a file that is being executed. DOS variants cannot rename or remove open files, and do not support commands like ‘rm foo >foo’, even though this is perfectly portable among Posix hosts. ‘rmdir’ Just as with ‘rm’, some platforms refuse to remove a working directory. ‘sed’ Patterns should not include the separator (unless escaped), even as part of a character class. In conformance with Posix, the Cray ‘sed’ rejects ‘s/[^/]*$//’: use ‘s%[^/]*$%%’. Even when escaped, patterns should not include separators that are also used as ‘sed’ metacharacters. For example, GNU sed 4.0.9 rejects ‘s,x\{1\,\},,’, while sed 4.1 strips the backslash before the comma before evaluating the basic regular expression. Avoid empty patterns within parentheses (i.e., ‘\(\)’). Posix does not require support for empty patterns, and Unicos 9 ‘sed’ rejects them. Unicos 9 ‘sed’ loops endlessly on patterns like ‘.*\n.*’. Sed scripts should not use branch labels longer than 7 characters and should not contain comments; AIX 5.3 ‘sed’ rejects indented comments. HP-UX sed has a limit of 99 commands (not counting ‘:’ commands) and 48 labels, which cannot be circumvented by using more than one script file. It can execute up to 19 reads with the ‘r’ command per cycle. Solaris ‘/usr/ucb/sed’ rejects usages that exceed a limit of about 6000 bytes for the internal representation of commands. Avoid redundant ‘;’, as some ‘sed’ implementations, such as NetBSD 1.4.2’s, incorrectly try to interpret the second ‘;’ as a command: $ echo a | sed 's/x/x/;;s/x/x/' sed: 1: "s/x/x/;;s/x/x/": invalid command code ; Some ‘sed’ implementations have a buffer limited to 4000 bytes, and this limits the size of input lines, output lines, and internal buffers that can be processed portably. Likewise, not all ‘sed’ implementations can handle embedded ‘NUL’ or a missing trailing newline. Remember that ranges within a bracket expression of a regular expression are only well-defined in the ‘C’ (or ‘POSIX’) locale. Meanwhile, support for character classes like ‘[[:upper:]]’ is not yet universal, so if you cannot guarantee the setting of ‘LC_ALL’, it is better to spell out a range ‘[ABCDEFGHIJKLMNOPQRSTUVWXYZ]’ than to rely on ‘[A-Z]’. Additionally, Posix states that regular expressions are only well-defined on characters. Unfortunately, there exist platforms such as MacOS X 10.5 where not all 8-bit byte values are valid characters, even though that platform has a single-byte ‘C’ locale. And Posix allows the existence of a multi-byte ‘C’ locale, although that does not yet appear to be a common implementation. At any rate, it means that not all bytes will be matched by the regular expression ‘.’: $ printf '\200\n' | LC_ALL=C sed -n /./p | wc -l 0 $ printf '\200\n' | LC_ALL=en_US.ISO8859-1 sed -n /./p | wc -l 1 Portable ‘sed’ regular expressions should use ‘\’ only to escape characters in the string ‘$()*.0123456789[\^n{}’. For example, alternation, ‘\|’, is common but Posix does not require its support, so it should be avoided in portable scripts. Solaris ‘sed’ does not support alternation; e.g., ‘sed '/a\|b/d'’ deletes only lines that contain the literal string ‘a|b’. Similarly, ‘\+’ and ‘\?’ should be avoided. Anchors (‘^’ and ‘$’) inside groups are not portable. Nested parentheses in patterns (e.g., ‘\(\(a*\)b*)\)’) are quite portable to current hosts, but was not supported by some ancient ‘sed’ implementations like SVR3. Some ‘sed’ implementations, e.g., Solaris, restrict the special role of the asterisk ‘*’ to one-character regular expressions and back-references, and the special role of interval expressions ‘\{M\}’, ‘\{M,\}’, or ‘\{M,N\}’ to one-character regular expressions. This may lead to unexpected behavior: $ echo '1*23*4' | /usr/bin/sed 's/\(.\)*/x/g' x2x4 $ echo '1*23*4' | /usr/xpg4/bin/sed 's/\(.\)*/x/g' x The ‘-e’ option is mostly portable. However, its argument cannot start with ‘a’, ‘c’, or ‘i’, as this runs afoul of a Tru64 5.1 bug. Also, its argument cannot be empty, as this fails on AIX 5.3. Some people prefer to use ‘-e’: sed -e 'COMMAND-1' \ -e 'COMMAND-2' as opposed to the equivalent: sed ' COMMAND-1 COMMAND-2 ' The following usage is sometimes equivalent: sed 'COMMAND-1;COMMAND-2' but Posix says that this use of a semicolon has undefined effect if COMMAND-1’s verb is ‘{’, ‘a’, ‘b’, ‘c’, ‘i’, ‘r’, ‘t’, ‘w’, ‘:’, or ‘#’, so you should use semicolon only with simple scripts that do not use these verbs. Posix up to the 2008 revision requires the argument of the ‘-e’ option to be a syntactically complete script. GNU ‘sed’ allows to pass multiple script fragments, each as argument of a separate ‘-e’ option, that are then combined, with newlines between the fragments, and a future Posix revision may allow this as well. This approach is not portable with script fragments ending in backslash; for example, the ‘sed’ programs on Solaris 10, HP-UX 11, and AIX don’t allow splitting in this case: $ echo a | sed -n -e 'i\ 0' 0 $ echo a | sed -n -e 'i\' -e 0 Unrecognized command: 0 In practice, however, this technique of joining fragments through ‘-e’ works for multiple ‘sed’ functions within ‘{’ and ‘}’, even if that is not specified by Posix: $ echo a | sed -n -e '/a/{' -e s/a/b/ -e p -e '}' b Commands inside { } brackets are further restricted. Posix 2008 says that they cannot be preceded by addresses, ‘!’, or ‘;’, and that each command must be followed immediately by a newline, without any intervening blanks or semicolons. The closing bracket must be alone on a line, other than white space preceding or following it. However, a future version of Posix may standardize the use of addresses within brackets. Contrary to yet another urban legend, you may portably use ‘&’ in the replacement part of the ‘s’ command to mean “what was matched”. All descendants of Unix version 7 ‘sed’ (at least; we don’t have first hand experience with older ‘sed’ implementations) have supported it. Posix requires that you must not have any white space between ‘!’ and the following command. It is OK to have blanks between the address and the ‘!’. For instance, on Solaris: $ echo "foo" | sed -n '/bar/ ! p' error→Unrecognized command: /bar/ ! p $ echo "foo" | sed -n '/bar/! p' error→Unrecognized command: /bar/! p $ echo "foo" | sed -n '/bar/ !p' foo Posix also says that you should not combine ‘!’ and ‘;’. If you use ‘!’, it is best to put it on a command that is delimited by newlines rather than ‘;’. Also note that Posix requires that the ‘b’, ‘t’, ‘r’, and ‘w’ commands be followed by exactly one space before their argument. On the other hand, no white space is allowed between ‘:’ and the subsequent label name. If a sed script is specified on the command line and ends in an ‘a’, ‘c’, or ‘i’ command, the last line of inserted text should be followed by a newline. Otherwise some ‘sed’ implementations (e.g., OpenBSD 3.9) do not append a newline to the inserted text. Many ‘sed’ implementations (e.g., MacOS X 10.4, OpenBSD 3.9, Solaris 10 ‘/usr/ucb/sed’) strip leading white space from the text of ‘a’, ‘c’, and ‘i’ commands. Prepend a backslash to work around this incompatibility with Posix: $ echo flushleft | sed 'a\ > indented > ' flushleft indented $ echo foo | sed 'a\ > \ indented > ' flushleft indented Posix requires that with an empty regular expression, the last non-empty regular expression from either an address specification or substitution command is applied. However, busybox 1.6.1 complains when using a substitution command with a replacement containing a back-reference to an empty regular expression; the workaround is repeating the regular expression. $ echo abc | busybox sed '/a\(b\)c/ s//\1/' sed: No previous regexp. $ echo abc | busybox sed '/a\(b\)c/ s/a\(b\)c/\1/' b Portable scripts should be aware of the inconsistencies and options for handling word boundaries, as these are not specified by POSIX. \< \b [[:<:]] Solaris 10 yes no no Solaris XPG4 yes no error NetBSD 5.1 no no yes FreeBSD 9.1 no no yes GNU yes yes error busybox yes yes error ‘sed’ (‘t’) Some old systems have ‘sed’ that “forget” to reset their ‘t’ flag when starting a new cycle. For instance on MIPS RISC/OS, and on IRIX 5.3, if you run the following ‘sed’ script (the line numbers are not actual part of the texts): s/keep me/kept/g # a t end # b s/.*/deleted/g # c :end # d on delete me # 1 delete me # 2 keep me # 3 delete me # 4 you get deleted delete me kept deleted instead of deleted deleted kept deleted Why? When processing line 1, (c) matches, therefore sets the ‘t’ flag, and the output is produced. When processing line 2, the ‘t’ flag is still set (this is the bug). Command (a) fails to match, but ‘sed’ is not supposed to clear the ‘t’ flag when a substitution fails. Command (b) sees that the flag is set, therefore it clears it, and jumps to (d), hence you get ‘delete me’ instead of ‘deleted’. When processing line (3), ‘t’ is clear, (a) matches, so the flag is set, hence (b) clears the flags and jumps. Finally, since the flag is clear, line 4 is processed properly. There are two things one should remember about ‘t’ in ‘sed’. Firstly, always remember that ‘t’ jumps if _some_ substitution succeeded, not only the immediately preceding substitution. Therefore, always use a fake ‘t clear’ followed by a ‘:clear’ on the next line, to reset the ‘t’ flag where needed. Secondly, you cannot rely on ‘sed’ to clear the flag at each new cycle. One portable implementation of the script above is: t clear :clear s/keep me/kept/g t end s/.*/deleted/g :end ‘sleep’ Using ‘sleep’ is generally portable. However, remember that adding a ‘sleep’ to work around timestamp issues, with a minimum granularity of one second, doesn’t scale well for parallel builds on modern machines with sub-second process completion. ‘sort’ Remember that sort order is influenced by the current locale. Inside ‘configure’, the C locale is in effect, but in Makefile snippets, you may need to specify ‘LC_ALL=C sort’. ‘tar’ There are multiple file formats for ‘tar’; if you use Automake, the macro ‘AM_INIT_AUTOMAKE’ has some options controlling which level of portability to use. ‘touch’ If you specify the desired timestamp (e.g., with the ‘-r’ option), older ‘touch’ implementations use the ‘utime’ or ‘utimes’ system call, which can result in the same kind of timestamp truncation problems that ‘cp -p’ has. On ancient BSD systems, ‘touch’ or any command that results in an empty file does not update the timestamps, so use a command like ‘echo’ as a workaround. Also, GNU ‘touch’ 3.16r (and presumably all before that) fails to work on SunOS 4.1.3 when the empty file is on an NFS-mounted 4.2 volume. However, these problems are no longer of practical concern. ‘tr’ Not all versions of ‘tr’ handle all backslash character escapes. For example, Solaris 10 ‘/usr/ucb/tr’ falls over, even though Solaris contains more modern ‘tr’ in other locations. Using octal escapes is more portable for carriage returns, since ‘\015’ is the same for both ASCII and EBCDIC, and since use of literal carriage returns in scripts causes a number of other problems. But for other characters, like newline, using octal escapes ties the operation to ASCII, so it is better to use literal characters. $ { echo moon; echo light; } | /usr/ucb/tr -d '\n' ; echo moo light $ { echo moon; echo light; } | /usr/bin/tr -d '\n' ; echo moonlight $ { echo moon; echo light; } | /usr/ucb/tr -d '\012' ; echo moonlight $ nl=' '; { echo moon; echo light; } | /usr/ucb/tr -d "$nl" ; echo moonlight Not all versions of ‘tr’ recognize direct ranges of characters: at least Solaris ‘/usr/bin/tr’ still fails to do so. But you can use ‘/usr/xpg4/bin/tr’ instead, or add brackets (which in Posix transliterate to themselves). $ echo "Hazy Fantazy" | LC_ALL=C /usr/bin/tr a-z A-Z HAZy FAntAZy $ echo "Hazy Fantazy" | LC_ALL=C /usr/bin/tr '[a-z]' '[A-Z]' HAZY FANTAZY $ echo "Hazy Fantazy" | LC_ALL=C /usr/xpg4/bin/tr a-z A-Z HAZY FANTAZY When providing two arguments, be sure the second string is at least as long as the first. $ echo abc | /usr/xpg4/bin/tr bc d adc $ echo abc | coreutils/tr bc d add Posix requires ‘tr’ to operate on binary files. But at least Solaris ‘/usr/ucb/tr’ and ‘/usr/bin/tr’ silently discard ‘NUL’ in the input prior to doing any translation. When using ‘tr’ to process a binary file that may contain ‘NUL’ bytes, it is necessary to use ‘/usr/xpg4/bin/tr’ instead, or ‘/usr/xpg6/bin/tr’ if that is available. $ printf 'a\0b' | /usr/ucb/tr x x | od -An -tx1 61 62 $ printf 'a\0b' | /usr/bin/tr x x | od -An -tx1 61 62 $ printf 'a\0b' | /usr/xpg4/bin/tr x x | od -An -tx1 61 00 62 Solaris ‘/usr/ucb/tr’ additionally fails to handle ‘\0’ as the octal escape for ‘NUL’. $ printf 'abc' | /usr/ucb/tr 'bc' '\0d' | od -An -tx1 61 62 63 $ printf 'abc' | /usr/bin/tr 'bc' '\0d' | od -An -tx1 61 00 64 $ printf 'abc' | /usr/xpg4/bin/tr 'bc' '\0d' | od -An -tx1 61 00 64  File: autoconf.info, Node: Portable Make, Next: Portable C and C++, Prev: Portable Shell, Up: Top 12 Portable Make Programming **************************** Writing portable makefiles is an art. Since a makefile’s commands are executed by the shell, you must consider the shell portability issues already mentioned. However, other issues are specific to ‘make’ itself. * Menu: * $< in Ordinary Make Rules:: $< in ordinary rules * Failure in Make Rules:: Failing portably in rules * Special Chars in Names:: Special Characters in Macro Names * Backslash-Newline-Empty:: Empty lines after backslash-newline * Backslash-Newline Comments:: Spanning comments across line boundaries * Long Lines in Makefiles:: Line length limitations * Macros and Submakes:: ‘make macro=value’ and submakes * The Make Macro MAKEFLAGS:: ‘$(MAKEFLAGS)’ portability issues * The Make Macro SHELL:: ‘$(SHELL)’ portability issues * Parallel Make:: Parallel ‘make’ quirks * Comments in Make Rules:: Other problems with Make comments * Newlines in Make Rules:: Using literal newlines in rules * Comments in Make Macros:: Other problems with Make comments in macros * Trailing whitespace in Make Macros:: Macro substitution problems * Command-line Macros and whitespace:: Whitespace trimming of values * obj/ and Make:: Don’t name a subdirectory ‘obj’ * make -k Status:: Exit status of ‘make -k’ * VPATH and Make:: ‘VPATH’ woes * Single Suffix Rules:: Single suffix rules and separated dependencies * Timestamps and Make:: Sub-second timestamp resolution  File: autoconf.info, Node: $< in Ordinary Make Rules, Next: Failure in Make Rules, Up: Portable Make 12.1 ‘$<’ in Ordinary Make Rules ================================ Posix says that the ‘$<’ construct in makefiles can be used only in inference rules and in the ‘.DEFAULT’ rule; its meaning in ordinary rules is unspecified. Solaris ‘make’ for instance replaces it with the empty string. OpenBSD (3.0 and later) ‘make’ diagnoses these uses and errors out.  File: autoconf.info, Node: Failure in Make Rules, Next: Special Chars in Names, Prev: $< in Ordinary Make Rules, Up: Portable Make 12.2 Failure in Make Rules ========================== Posix 2008 requires that ‘make’ must invoke each command with the equivalent of a ‘sh -e -c’ subshell, which causes the subshell to exit immediately if a subsidiary simple-command fails, although not all ‘make’ implementations have historically followed this rule. For example, the command ‘touch T; rm -f U’ may attempt to remove ‘U’ even if the ‘touch’ fails, although this is not permitted with Posix make. One way to work around failures in simple commands is to reword them so that they always succeed, e.g., ‘touch T || :; rm -f U’. However, even this approach can run into common bugs in BSD implementations of the ‘-e’ option of ‘sh’ and ‘set’ (*note Limitations of Shell Builtins: set.), so if you are worried about porting to buggy BSD shells it may be simpler to migrate complicated ‘make’ actions into separate scripts.  File: autoconf.info, Node: Special Chars in Names, Next: Backslash-Newline-Empty, Prev: Failure in Make Rules, Up: Portable Make 12.3 Special Characters in Make Macro Names =========================================== Posix limits macro names to nonempty strings containing only ASCII letters and digits, ‘.’, and ‘_’. Many ‘make’ implementations allow a wider variety of characters, but portable makefiles should avoid them. It is portable to start a name with a special character, e.g., ‘$(.FOO)’. Some ancient ‘make’ implementations don’t support leading underscores in macro names. An example is NEWS-OS 4.2R. $ cat Makefile _am_include = # _am_quote = all:; @echo this is test $ make Make: Must be a separator on rules line 2. Stop. $ cat Makefile2 am_include = # am_quote = all:; @echo this is test $ make -f Makefile2 this is test However, this problem is no longer of practical concern.  File: autoconf.info, Node: Backslash-Newline-Empty, Next: Backslash-Newline Comments, Prev: Special Chars in Names, Up: Portable Make 12.4 Backslash-Newline Before Empty Lines ========================================= A bug in Bash 2.03 can cause problems if a Make rule contains a backslash-newline followed by line that expands to nothing. For example, on Solaris 8: SHELL = /bin/bash EMPTY = foo: touch foo \ $(EMPTY) executes /bin/bash -c 'touch foo \ ' which fails with a syntax error, due to the Bash bug. To avoid this problem, avoid nullable macros in the last line of a multi-line command. On some versions of HP-UX, ‘make’ reads multiple newlines following a backslash, continuing to the next non-empty line. For example, FOO = one \ BAR = two test: : FOO is "$(FOO)" : BAR is "$(BAR)" shows ‘FOO’ equal to ‘one BAR = two’. Other implementations sensibly let a backslash continue only to the immediately following line.  File: autoconf.info, Node: Backslash-Newline Comments, Next: Long Lines in Makefiles, Prev: Backslash-Newline-Empty, Up: Portable Make 12.5 Backslash-Newline in Make Comments ======================================= According to Posix, Make comments start with ‘#’ and continue until an unescaped newline is reached. $ cat Makefile # A = foo \ bar \ baz all: @echo ok $ make # GNU make ok However this is not always the case. Some implementations discard everything from ‘#’ through the end of the line, ignoring any trailing backslash. $ pmake # BSD make "Makefile", line 3: Need an operator Fatal errors encountered -- cannot continue Therefore, if you want to comment out a multi-line definition, prefix each line with ‘#’, not only the first. # A = foo \ # bar \ # baz  File: autoconf.info, Node: Long Lines in Makefiles, Next: Macros and Submakes, Prev: Backslash-Newline Comments, Up: Portable Make 12.6 Long Lines in Makefiles ============================ Tru64 5.1’s ‘make’ has been reported to crash when given a makefile with lines longer than around 20 kB. Earlier versions are reported to exit with ‘Line too long’ diagnostics.  File: autoconf.info, Node: Macros and Submakes, Next: The Make Macro MAKEFLAGS, Prev: Long Lines in Makefiles, Up: Portable Make 12.7 ‘make macro=value’ and Submakes ==================================== A command-line variable definition such as ‘foo=bar’ overrides any definition of ‘foo’ in a makefile. Some ‘make’ implementations (such as GNU ‘make’) propagate this override to subsidiary invocations of ‘make’. Some other implementations do not pass the substitution along to submakes. $ cat Makefile foo = foo one: @echo $(foo) $(MAKE) two two: @echo $(foo) $ make foo=bar # GNU make 3.79.1 bar make two make[1]: Entering directory `/home/adl' bar make[1]: Leaving directory `/home/adl' $ pmake foo=bar # BSD make bar pmake two foo You have a few possibilities if you do want the ‘foo=bar’ override to propagate to submakes. One is to use the ‘-e’ option, which causes all environment variables to have precedence over the makefile macro definitions, and declare foo as an environment variable: $ env foo=bar make -e The ‘-e’ option is propagated to submakes automatically, and since the environment is inherited between ‘make’ invocations, the ‘foo’ macro is overridden in submakes as expected. This syntax (‘foo=bar make -e’) is portable only when used outside of a makefile, for instance from a script or from the command line. When run inside a ‘make’ rule, GNU ‘make’ 3.80 and prior versions forget to propagate the ‘-e’ option to submakes. Moreover, using ‘-e’ could have unexpected side effects if your environment contains some other macros usually defined by the makefile. (See also the note about ‘make -e’ and ‘SHELL’ below.) If you can foresee all macros that a user might want to override, then you can propagate them to submakes manually, from your makefile: foo = foo one: @echo $(foo) $(MAKE) foo=$(foo) two two: @echo $(foo) Another way to propagate a variable to submakes in a portable way is to expand an extra variable in every invocation of ‘$(MAKE)’ within your makefile: foo = foo one: @echo $(foo) $(MAKE) $(SUBMAKEFLAGS) two two: @echo $(foo) Users must be aware that this technique is in use to take advantage of it, e.g. with ‘make foo=bar SUBMAKEFLAGS='foo=bar'’, but it allows any macro to be overridden. Makefiles generated by ‘automake’ use this technique, expanding ‘$(AM_MAKEFLAGS)’ on the command lines of submakes (*note Automake: (automake)Subdirectories.).  File: autoconf.info, Node: The Make Macro MAKEFLAGS, Next: The Make Macro SHELL, Prev: Macros and Submakes, Up: Portable Make 12.8 The Make Macro MAKEFLAGS ============================= Posix requires ‘make’ to use ‘MAKEFLAGS’ to affect the current and recursive invocations of make, but allows implementations several formats for the variable. It is tricky to parse ‘$MAKEFLAGS’ to determine whether ‘-s’ for silent execution or ‘-k’ for continued execution are in effect. For example, you cannot assume that the first space-separated word in ‘$MAKEFLAGS’ contains single-letter options, since in the Cygwin version of GNU ‘make’ it is either ‘--unix’ or ‘--win32’ with the second word containing single-letter options. $ cat Makefile all: @echo MAKEFLAGS = $(MAKEFLAGS) $ make MAKEFLAGS = --unix $ make -k MAKEFLAGS = --unix -k  File: autoconf.info, Node: The Make Macro SHELL, Next: Parallel Make, Prev: The Make Macro MAKEFLAGS, Up: Portable Make 12.9 The Make Macro ‘SHELL’ =========================== Posix-compliant ‘make’ internally uses the ‘$(SHELL)’ macro to spawn shell processes and execute Make rules. This is a builtin macro supplied by ‘make’, but it can be modified by a makefile or by a command-line argument. Not all ‘make’ implementations define this ‘SHELL’ macro. Tru64 ‘make’ is an example; this implementation always uses ‘/bin/sh’. So it’s a good idea to always define ‘SHELL’ in your makefiles. If you use Autoconf, do SHELL = @SHELL@ If you use Automake, this is done for you. Do not force ‘SHELL = /bin/sh’ because that is not correct everywhere. Remember, ‘/bin/sh’ is not Posix compliant on many systems, such as FreeBSD 4, NetBSD 3, AIX 3, Solaris 10, or Tru64. Additionally, DJGPP lacks ‘/bin/sh’, and when its GNU ‘make’ port sees such a setting it enters a special emulation mode where features like pipes and redirections are emulated on top of DOS’s ‘command.com’. Unfortunately this emulation is incomplete; for instance it does not handle command substitutions. Using ‘@SHELL@’ means that your makefile will benefit from the same improved shell, such as ‘bash’ or ‘ksh’, that was discovered during ‘configure’, so that you aren’t fighting two different sets of shell bugs between the two contexts. Posix-compliant ‘make’ should never acquire the value of $(SHELL) from the environment, even when ‘make -e’ is used (otherwise, think about what would happen to your rules if ‘SHELL=/bin/tcsh’). However not all ‘make’ implementations have this exception. For instance it’s not surprising that Tru64 ‘make’ doesn’t protect ‘SHELL’, since it doesn’t use it. $ cat Makefile SHELL = /bin/sh FOO = foo all: @echo $(SHELL) @echo $(FOO) $ env SHELL=/bin/tcsh FOO=bar make -e # Tru64 Make /bin/tcsh bar $ env SHELL=/bin/tcsh FOO=bar gmake -e # GNU make /bin/sh bar Conversely, ‘make’ is not supposed to export any changes to the macro ‘SHELL’ to child processes. Again, many implementations break this rule: $ cat Makefile all: @echo $(SHELL) @printenv SHELL $ env SHELL=sh make -e SHELL=/bin/ksh # BSD Make, GNU make 3.80 /bin/ksh /bin/ksh $ env SHELL=sh gmake -e SHELL=/bin/ksh # GNU make 3.81 /bin/ksh sh  File: autoconf.info, Node: Parallel Make, Next: Comments in Make Rules, Prev: The Make Macro SHELL, Up: Portable Make 12.10 Parallel Make =================== Support for parallel execution in ‘make’ implementation varies. Generally, using GNU make is your best bet. When NetBSD or FreeBSD ‘make’ are run in parallel mode, they will reuse the same shell for multiple commands within one recipe. This can have various unexpected consequences. For example, changes of directories or variables persist between recipes, so that: all: @var=value; cd /; pwd; echo $$var; echo $$$$ @pwd; echo $$var; echo $$$$ may output the following with ‘make -j1’, at least on NetBSD up to 5.1 and FreeBSD up to 8.2: / value 32235 / value 32235 while without ‘-j1’, or with ‘-B’, the output looks less surprising: / value 32238 /tmp 32239 Another consequence is that, if one command in a recipe uses ‘exit 0’ to indicate a successful exit, the shell will be gone and the remaining commands of this recipe will not be executed. The BSD ‘make’ implementations, when run in parallel mode, will also pass the ‘Makefile’ recipes to the shell through its standard input, thus making it unusable from the recipes: $ cat Makefile read: @read line; echo LINE: $$line $ echo foo | make read LINE: foo $ echo foo | make -j1 read # NetBSD 5.1 and FreeBSD 8.2 LINE: Moreover, when FreeBSD ‘make’ (up at least to 8.2) is run in parallel mode, it implements the ‘@’ and ‘-’ “recipe modifiers” by dynamically modifying the active shell flags. This behavior has the effects of potentially clobbering the exit status of recipes silenced with the ‘@’ modifier if they also unset the ‘errexit’ shell flag, and of mangling the output in unexpected ways: $ cat Makefile a: @echo $$-; set +e; false b: -echo $$-; false; echo set - $ make a; echo status: $? ehBc *** Error code 1 status: 1 $ make -j1 a; echo status: $? ehB status: 0 $ make b echo $-; echo set - hBc set - $ make -j1 b echo $-; echo hvB You can avoid all these issues by using the ‘-B’ option to enable compatibility semantics. However, that will effectively also disable all parallelism as that will cause prerequisites to be updated in the order they are listed in a rule. Some make implementations (among them, FreeBSD ‘make’, NetBSD ‘make’, and Solaris ‘dmake’), when invoked with a ‘-jN’ option, connect the standard output and standard error of all their child processes to pipes or temporary regular files. This can lead to subtly different semantics in the behavior of the spawned processes. For example, even if the ‘make’ standard output is connected to a tty, the recipe command will not be: $ cat Makefile all: @test -t 1 && echo "Is a tty" || echo "Is not a tty" $ make -j 2 # FreeBSD 8.2 make Is not a tty $ make -j 2 # NetBSD 5.1 make --- all --- Is not a tty $ dmake -j 2 # Solaris 10 dmake HOSTNAME --> 1 job HOSTNAME --> Job output Is not a tty On the other hand: $ make -j 2 # GNU make, Heirloom make Is a tty The above examples also show additional status output produced in parallel mode for targets being updated by Solaris ‘dmake’ and NetBSD ‘make’ (but _not_ by FreeBSD ‘make’). Furthermore, parallel runs of those ‘make’ implementations will route standard error from commands that they spawn into their own standard output, and may remove leading whitespace from output lines.  File: autoconf.info, Node: Comments in Make Rules, Next: Newlines in Make Rules, Prev: Parallel Make, Up: Portable Make 12.11 Comments in Make Rules ============================ Never put comments in a rule. Some ‘make’ treat anything starting with a tab as a command for the current rule, even if the tab is immediately followed by a ‘#’. The ‘make’ from Tru64 Unix V5.1 is one of them. The following makefile runs ‘# foo’ through the shell. all: # foo As a workaround, you can use the ‘:’ no-op command with a string argument that gets ignored: all: : "foo" Conversely, if you want to use the ‘#’ character in some command, you can only do so by expanding it inside a rule (*note Comments in Make Macros::). So for example, if ‘COMMENT_CHAR’ is substituted by ‘config.status’ as ‘#’, then the following substitutes ‘@COMMENT_CHAR@’ in a generated header: foo.h: foo.h.in sed -e 's|@''COMMENT_CHAR''@|@COMMENT_CHAR@|g' \ $(srcdir)/foo.h.in > $@ The funny shell quoting avoids a substitution at ‘config.status’ run time of the left-hand side of the ‘sed’ ‘s’ command.  File: autoconf.info, Node: Newlines in Make Rules, Next: Comments in Make Macros, Prev: Comments in Make Rules, Up: Portable Make 12.12 Newlines in Make Rules ============================ In shell scripts, newlines can be used inside string literals. But in the shell statements of ‘Makefile’ rules, this is not possible: A newline not preceded by a backslash is a separator between shell statements. Whereas a newline that is preceded by a backslash becomes part of the shell statement according to POSIX, but gets replaced, together with the backslash that precedes it, by a space in GNU ‘make’ 3.80 and older. So, how can a newline be used in a string literal? The trick is to set up a shell variable that contains a newline: nlinit=`echo 'nl="'; echo '"'`; eval "$$nlinit" For example, in order to create a multi-line ‘sed’ expression that inserts a blank line after every line of a file, this code can be used: nlinit=`echo 'nl="'; echo '"'`; eval "$$nlinit"; \ sed -e "s/\$$/\\$${nl}/" < input > output  File: autoconf.info, Node: Comments in Make Macros, Next: Trailing whitespace in Make Macros, Prev: Newlines in Make Rules, Up: Portable Make 12.13 Comments in Make Macros ============================= Avoid putting comments in macro values as far as possible. Posix specifies that the text starting from the ‘#’ sign until the end of the line is to be ignored, which has the unfortunate effect of disallowing them even within quotes. Thus, the following might lead to a syntax error at compile time: CPPFLAGS = "-DCOMMENT_CHAR='#'" as ‘CPPFLAGS’ may be expanded to ‘"-DCOMMENT_CHAR='’. Most ‘make’ implementations disregard this and treat single and double quotes specially here. Also, GNU ‘make’ lets you put ‘#’ into a macro value by escaping it with a backslash, i.e., ‘\#’. However, neither of these usages are portable. *Note Comments in Make Rules::, for a portable alternative. Even without quoting involved, comments can have surprising effects, because the whitespace before them is part of the variable value: foo = bar # trailing comment print: ; @echo "$(foo)." prints ‘bar .’, which is usually not intended, and can expose ‘make’ bugs as described below.  File: autoconf.info, Node: Trailing whitespace in Make Macros, Next: Command-line Macros and whitespace, Prev: Comments in Make Macros, Up: Portable Make 12.14 Trailing whitespace in Make Macros ======================================== GNU ‘make’ 3.80 mistreats trailing whitespace in macro substitutions and appends another spurious suffix: empty = foo = bar $(empty) print: ; @echo $(foo:=.test) prints ‘bar.test .test’. BSD and Solaris ‘make’ implementations do not honor trailing whitespace in macro definitions as Posix requires: foo = bar # Note the space after "bar". print: ; @echo $(foo)t prints ‘bart’ instead of ‘bar t’. To work around this, you can use a helper macro as in the previous example.  File: autoconf.info, Node: Command-line Macros and whitespace, Next: obj/ and Make, Prev: Trailing whitespace in Make Macros, Up: Portable Make 12.15 Command-line Macros and whitespace ======================================== Some ‘make’ implementations may strip trailing whitespace off of macros set on the command line in addition to leading whitespace. Further, some may strip leading whitespace off of macros set from environment variables: $ echo 'print: ; @echo "x$(foo)x$(bar)x"' | foo=' f f ' make -f - bar=' b b ' x f f xb b x # AIX, BSD, GNU make xf f xb b x # HP-UX, IRIX, Tru64/OSF make x f f xb bx # Solaris make  File: autoconf.info, Node: obj/ and Make, Next: make -k Status, Prev: Command-line Macros and whitespace, Up: Portable Make 12.16 The ‘obj/’ Subdirectory and Make ====================================== Never name one of your subdirectories ‘obj/’ if you don’t like surprises. If an ‘obj/’ directory exists, BSD ‘make’ enters it before reading the makefile. Hence the makefile in the current directory is not read. $ cat Makefile all: echo Hello $ cat obj/Makefile all: echo World $ make # GNU make echo Hello Hello $ pmake # BSD make echo World World  File: autoconf.info, Node: make -k Status, Next: VPATH and Make, Prev: obj/ and Make, Up: Portable Make 12.17 Exit Status of ‘make -k’ ============================== Do not rely on the exit status of ‘make -k’. Some implementations reflect whether they encountered an error in their exit status; other implementations always succeed. $ cat Makefile all: false $ make -k; echo exit status: $? # GNU make false make: *** [all] Error 1 exit status: 2 $ pmake -k; echo exit status: $? # BSD make false *** Error code 1 (continuing) exit status: 0  File: autoconf.info, Node: VPATH and Make, Next: Single Suffix Rules, Prev: make -k Status, Up: Portable Make 12.18 ‘VPATH’ and Make ====================== Posix does not specify the semantics of ‘VPATH’. Typically, ‘make’ supports ‘VPATH’, but its implementation is not consistent. Autoconf and Automake support makefiles whose usages of ‘VPATH’ are portable to recent-enough popular implementations of ‘make’, but to keep the resulting makefiles portable, a package’s makefile prototypes must take the following issues into account. These issues are complicated and are often poorly understood, and installers who use ‘VPATH’ should expect to find many bugs in this area. If you use ‘VPATH’, the simplest way to avoid these portability bugs is to stick with GNU ‘make’, since it is the most commonly-used ‘make’ among Autoconf users. Here are some known issues with some ‘VPATH’ implementations. * Menu: * Variables listed in VPATH:: ‘VPATH’ must be literal on ancient hosts * VPATH and Double-colon:: Problems with ‘::’ on ancient hosts * $< in Explicit Rules:: ‘$<’ does not work in ordinary rules * Automatic Rule Rewriting:: ‘VPATH’ goes wild on Solaris * Tru64 Directory Magic:: ‘mkdir’ goes wild on Tru64 * Make Target Lookup:: More details about ‘VPATH’ lookup  File: autoconf.info, Node: Variables listed in VPATH, Next: VPATH and Double-colon, Up: VPATH and Make 12.18.1 Variables listed in ‘VPATH’ ----------------------------------- Do not set ‘VPATH’ to the value of another variable, for example ‘VPATH = $(srcdir)’, because some ancient versions of ‘make’ do not do variable substitutions on the value of ‘VPATH’. For example, use this srcdir = @srcdir@ VPATH = @srcdir@ rather than ‘VPATH = $(srcdir)’. Note that with GNU Automake, there is no need to set this yourself.  File: autoconf.info, Node: VPATH and Double-colon, Next: $< in Explicit Rules, Prev: Variables listed in VPATH, Up: VPATH and Make 12.18.2 ‘VPATH’ and Double-colon Rules -------------------------------------- With ancient versions of Sun ‘make’, any assignment to ‘VPATH’ causes ‘make’ to execute only the first set of double-colon rules. However, this problem is no longer of practical concern.  File: autoconf.info, Node: $< in Explicit Rules, Next: Automatic Rule Rewriting, Prev: VPATH and Double-colon, Up: VPATH and Make 12.18.3 ‘$<’ Not Supported in Explicit Rules -------------------------------------------- Using ‘$<’ in explicit rules is not portable. The prerequisite file must be named explicitly in the rule. If you want to find the prerequisite via a ‘VPATH’ search, you have to code the whole thing manually. *Note Build Directories::.  File: autoconf.info, Node: Automatic Rule Rewriting, Next: Tru64 Directory Magic, Prev: $< in Explicit Rules, Up: VPATH and Make 12.18.4 Automatic Rule Rewriting -------------------------------- Some ‘make’ implementations, such as Solaris and Tru64, search for prerequisites in ‘VPATH’ and then rewrite each occurrence as a plain word in the rule. For instance: # This isn't portable to GNU make. VPATH = ../pkg/src f.c: if.c cp if.c f.c executes ‘cp ../pkg/src/if.c f.c’ if ‘if.c’ is found in ‘../pkg/src’. However, this rule leads to real problems in practice. For example, if the source directory contains an ordinary file named ‘test’ that is used in a dependency, Solaris ‘make’ rewrites commands like ‘if test -r foo; ...’ to ‘if ../pkg/src/test -r foo; ...’, which is typically undesirable. In fact, ‘make’ is completely unaware of shell syntax used in the rules, so the VPATH rewrite can potentially apply to _any_ whitespace-separated word in a rule, including shell variables, functions, and keywords. $ mkdir build $ cd build $ cat > Makefile <<'END' VPATH = .. all: arg func for echo func () { for arg in "$$@"; do echo $$arg; done; }; \ func "hello world" END $ touch ../arg ../func ../for ../echo $ make ../func () { ../for ../arg in "$@"; do ../echo $arg; done; }; \ ../func "hello world" sh: syntax error at line 1: `do' unexpected *** Error code 2 To avoid this problem, portable makefiles should never mention a source file or dependency whose name is that of a shell keyword like ‘for’ or ‘until’, a shell command like ‘cat’ or ‘gcc’ or ‘test’, or a shell function or variable used in the corresponding ‘Makefile’ recipe. Because of these problems GNU ‘make’ and many other ‘make’ implementations do not rewrite commands, so portable makefiles should search ‘VPATH’ manually. It is tempting to write this: # This isn't portable to Solaris make. VPATH = ../pkg/src f.c: if.c cp `test -f if.c || echo $(VPATH)/`if.c f.c However, the “prerequisite rewriting” still applies here. So if ‘if.c’ is in ‘../pkg/src’, Solaris and Tru64 ‘make’ execute cp `test -f ../pkg/src/if.c || echo ../pkg/src/`if.c f.c which reduces to cp if.c f.c and thus fails. Oops. A simple workaround, and good practice anyway, is to use ‘$?’ and ‘$@’ when possible: VPATH = ../pkg/src f.c: if.c cp $? $@ but this does not generalize well to commands with multiple prerequisites. A more general workaround is to rewrite the rule so that the prerequisite ‘if.c’ never appears as a plain word. For example, these three rules would be safe, assuming ‘if.c’ is in ‘../pkg/src’ and the other files are in the working directory: VPATH = ../pkg/src f.c: if.c f1.c cat `test -f ./if.c || echo $(VPATH)/`if.c f1.c >$@ g.c: if.c g1.c cat `test -f 'if.c' || echo $(VPATH)/`if.c g1.c >$@ h.c: if.c h1.c cat `test -f "if.c" || echo $(VPATH)/`if.c h1.c >$@ Things get worse when your prerequisites are in a macro. VPATH = ../pkg/src HEADERS = f.h g.h h.h install-HEADERS: $(HEADERS) for i in $(HEADERS); do \ $(INSTALL) -m 644 \ `test -f $$i || echo $(VPATH)/`$$i \ $(DESTDIR)$(includedir)/$$i; \ done The above ‘install-HEADERS’ rule is not Solaris-proof because ‘for i in $(HEADERS);’ is expanded to ‘for i in f.h g.h h.h;’ where ‘f.h’ and ‘g.h’ are plain words and are hence subject to ‘VPATH’ adjustments. If the three files are in ‘../pkg/src’, the rule is run as: for i in ../pkg/src/f.h ../pkg/src/g.h h.h; do \ install -m 644 \ `test -f $i || echo ../pkg/src/`$i \ /usr/local/include/$i; \ done where the two first ‘install’ calls fail. For instance, consider the ‘f.h’ installation: install -m 644 \ `test -f ../pkg/src/f.h || \ echo ../pkg/src/ \ `../pkg/src/f.h \ /usr/local/include/../pkg/src/f.h; It reduces to: install -m 644 \ ../pkg/src/f.h \ /usr/local/include/../pkg/src/f.h; Note that the manual ‘VPATH’ search did not cause any problems here; however this command installs ‘f.h’ in an incorrect directory. Trying to quote ‘$(HEADERS)’ in some way, as we did for ‘foo.c’ a few makefiles ago, does not help: install-HEADERS: $(HEADERS) headers='$(HEADERS)'; \ for i in $$headers; do \ $(INSTALL) -m 644 \ `test -f $$i || echo $(VPATH)/`$$i \ $(DESTDIR)$(includedir)/$$i; \ done Now, ‘headers='$(HEADERS)'’ macro-expands to: headers='f.h g.h h.h' but ‘g.h’ is still a plain word. (As an aside, the idiom ‘headers='$(HEADERS)'; for i in $$headers;’ is a good idea if ‘$(HEADERS)’ can be empty, because some shells diagnose a syntax error on ‘for i in;’.) One workaround is to strip this unwanted ‘../pkg/src/’ prefix manually: VPATH = ../pkg/src HEADERS = f.h g.h h.h install-HEADERS: $(HEADERS) headers='$(HEADERS)'; \ for i in $$headers; do \ i=`expr "$$i" : '$(VPATH)/\(.*\)'`; $(INSTALL) -m 644 \ `test -f $$i || echo $(VPATH)/`$$i \ $(DESTDIR)$(includedir)/$$i; \ done Automake does something similar. However the above hack works only if the files listed in ‘HEADERS’ are in the current directory or a subdirectory; they should not be in an enclosing directory. If we had ‘HEADERS = ../f.h’, the above fragment would fail in a VPATH build with Tru64 ‘make’. The reason is that not only does Tru64 ‘make’ rewrite dependencies, but it also simplifies them. Hence ‘../f.h’ becomes ‘../pkg/f.h’ instead of ‘../pkg/src/../f.h’. This obviously defeats any attempt to strip a leading ‘../pkg/src/’ component. The following example makes the behavior of Tru64 ‘make’ more apparent. $ cat Makefile VPATH = sub all: ../foo echo ../foo $ ls Makefile foo $ make echo foo foo Dependency ‘../foo’ was found in ‘sub/../foo’, but Tru64 ‘make’ simplified it as ‘foo’. (Note that the ‘sub/’ directory does not even exist, this just means that the simplification occurred before the file was checked for.) For the record here is how SunOS 4 ‘make’ behaves on this example. $ make make: Fatal error: Don't know how to make target `../foo' $ mkdir sub $ make echo sub/../foo sub/../foo  File: autoconf.info, Node: Tru64 Directory Magic, Next: Make Target Lookup, Prev: Automatic Rule Rewriting, Up: VPATH and Make 12.18.5 Tru64 ‘make’ Creates Prerequisite Directories Magically --------------------------------------------------------------- When a prerequisite is a subdirectory of ‘VPATH’, Tru64 ‘make’ creates it in the current directory. $ mkdir -p foo/bar build $ cd build $ cat >Makefile <dest-stamp Apart from timestamp resolution, there are also differences in handling equal timestamps. HP-UX ‘make’ updates targets if it has the same timestamp as one of its prerequisites, in violation of Posix rules. This can cause spurious rebuilds for repeated runs of ‘make’. This in turn can cause ‘make’ to fail if it tries to rebuild generated files in a possibly read-only source tree with tools not present on the end-user machine. Use GNU ‘make’ instead.  File: autoconf.info, Node: Portable C and C++, Next: Manual Configuration, Prev: Portable Make, Up: Top 13 Portable C and C++ Programming ********************************* C and C++ programs often use low-level features of the underlying system, and therefore are often more difficult to make portable to other platforms. Several standards have been developed to help make your programs more portable. If you write programs with these standards in mind, you can have greater confidence that your programs work on a wide variety of systems. *Note Language Standards Supported by GCC: (gcc)Standards, for a list of C-related standards. Many programs also assume the Posix standard (https://en.wikipedia.org/wiki/POSIX). Some old code is written to be portable to K&R C, which predates any C standard. K&R C compilers are no longer of practical interest, though, and the rest of section assumes at least C89, the first C standard. Program portability is a huge topic, and this section can only briefly introduce common pitfalls. *Note Portability between System Types: (standards)System Portability, for more information. * Menu: * Varieties of Unportability:: How to make your programs unportable * Integer Overflow:: When integers get too large * Preprocessor Arithmetic:: ‘#if’ expression problems * Null Pointers:: Properties of null pointers * Buffer Overruns:: Subscript errors and the like * Volatile Objects:: ‘volatile’ and signals * Floating Point Portability:: Portable floating-point arithmetic * Exiting Portably:: Exiting and the exit status  File: autoconf.info, Node: Varieties of Unportability, Next: Integer Overflow, Up: Portable C and C++ 13.1 Varieties of Unportability =============================== Autoconf tests and ordinary programs often need to test what is allowed on a system, and therefore they may need to deliberately exceed the boundaries of what the standards allow, if only to see whether an optional feature is present. When you write such a program, you should keep in mind the difference between constraints, unspecified behavior, and undefined behavior. In C, a “constraint” is a rule that the compiler must enforce. An example constraint is that C programs must not declare a bit-field with negative width. Tests can therefore reliably assume that programs with negative-width bit-fields are rejected by a compiler that conforms to the standard. “Unspecified behavior” is valid behavior, where the standard allows multiple possibilities. For example, the order of evaluation of function arguments is unspecified. Some unspecified behavior is “implementation-defined”, i.e., documented by the implementation, but since Autoconf tests cannot read the documentation they cannot distinguish between implementation-defined and other unspecified behavior. It is common for Autoconf tests to probe implementations to determine otherwise-unspecified behavior. “Undefined behavior” is invalid behavior, where the standard allows the implementation to do anything it pleases. For example, dereferencing a null pointer leads to undefined behavior. If possible, test programs should avoid undefined behavior, since a program with undefined behavior might succeed on a test that should fail. The above rules apply to programs that are intended to conform to the standard. However, strictly-conforming programs are quite rare, since the standards are so limiting. A major goal of Autoconf is to support programs that use implementation features not described by the standard, and it is fairly common for test programs to violate the above rules, if the programs work well enough in practice.  File: autoconf.info, Node: Integer Overflow, Next: Preprocessor Arithmetic, Prev: Varieties of Unportability, Up: Portable C and C++ 13.2 Integer Overflow ===================== In practice many portable C programs assume that signed integer overflow wraps around reliably using two’s complement arithmetic. Yet the C standard says that program behavior is undefined on overflow, and in a few cases C programs do not work on some modern implementations because their overflows do not wrap around as their authors expected. Conversely, in signed integer remainder, the C standard requires overflow behavior that is commonly not implemented. * Menu: * Integer Overflow Basics:: Why integer overflow is a problem * Signed Overflow Examples:: Examples of code assuming wraparound * Optimization and Wraparound:: Optimizations that break uses of wraparound * Signed Overflow Advice:: Practical advice for signed overflow issues * Signed Integer Division:: ‘INT_MIN / -1’ and ‘INT_MIN % -1’  File: autoconf.info, Node: Integer Overflow Basics, Next: Signed Overflow Examples, Up: Integer Overflow 13.2.1 Basics of Integer Overflow --------------------------------- In languages like C, unsigned integer overflow reliably wraps around; e.g., ‘UINT_MAX + 1’ yields zero. This is guaranteed by the C standard and is portable in practice, unless you specify aggressive, nonstandard optimization options suitable only for special applications. In contrast, the C standard says that signed integer overflow leads to undefined behavior where a program can do anything, including dumping core or overrunning a buffer. The misbehavior can even precede the overflow. Such an overflow can occur during addition, subtraction, multiplication, division, and left shift. Despite this requirement of the standard, many C programs and Autoconf tests assume that signed integer overflow silently wraps around modulo a power of two, using two’s complement arithmetic, so long as you cast the resulting value to a signed integer type or store it into a signed integer variable. If you use conservative optimization flags, such programs are generally portable to the vast majority of modern platforms, with a few exceptions discussed later. For historical reasons the C standard also allows implementations with ones’ complement or signed magnitude arithmetic, but it is safe to assume two’s complement nowadays. Also, overflow can occur when converting an out-of-range value to a signed integer type. Here a standard implementation must define what happens, but this might include raising an exception. In practice all known implementations support silent wraparound in this case, so you need not worry about other possibilities.  File: autoconf.info, Node: Signed Overflow Examples, Next: Optimization and Wraparound, Prev: Integer Overflow Basics, Up: Integer Overflow 13.2.2 Examples of Code Assuming Wraparound Overflow ---------------------------------------------------- There has long been a tension between what the C standard requires for signed integer overflow, and what C programs commonly assume. The standard allows aggressive optimizations based on assumptions that overflow never occurs, but many practical C programs rely on overflow wrapping around. These programs do not conform to the standard, but they commonly work in practice because compiler writers are understandably reluctant to implement optimizations that would break many programs, unless perhaps a user specifies aggressive optimization. The C Standard says that if a program has signed integer overflow its behavior is undefined, and the undefined behavior can even precede the overflow. To take an extreme example: if (password == expected_password) allow_superuser_privileges (); else if (counter++ == INT_MAX) abort (); else printf ("%d password mismatches\n", counter); If the ‘int’ variable ‘counter’ equals ‘INT_MAX’, ‘counter++’ must overflow and the behavior is undefined, so the C standard allows the compiler to optimize away the test against ‘INT_MAX’ and the ‘abort’ call. Worse, if an earlier bug in the program lets the compiler deduce that ‘counter == INT_MAX’ or that ‘counter’ previously overflowed, the C standard allows the compiler to optimize away the password test and generate code that allows superuser privileges unconditionally. Despite this requirement by the standard, it has long been common for C code to assume wraparound arithmetic after signed overflow, and all known practical C implementations support some C idioms that assume wraparound signed arithmetic, even if the idioms do not conform strictly to the standard. If your code looks like the following examples it will almost surely work with real-world compilers. Here is an example derived from the 7th Edition Unix implementation of ‘atoi’ (1979-01-10): char *p; int f, n; ... while (*p >= '0' && *p <= '9') n = n * 10 + *p++ - '0'; return (f ? -n : n); Even if the input string is in range, on most modern machines this has signed overflow when computing the most negative integer (the ‘-n’ overflows) or a value near an extreme integer (the first ‘+’ overflows). Here is another example, derived from the 7th Edition implementation of ‘rand’ (1979-01-10). Here the programmer expects both multiplication and addition to wrap on overflow: static long int randx = 1; ... randx = randx * 1103515245 + 12345; return (randx >> 16) & 077777; In the following example, derived from the GNU C Library 2.5 implementation of ‘mktime’ (2006-09-09), the code assumes wraparound arithmetic in ‘+’ to detect signed overflow: time_t t, t1, t2; int sec_requested, sec_adjustment; ... t1 = t + sec_requested; t2 = t1 + sec_adjustment; if (((t1 < t) != (sec_requested < 0)) | ((t2 < t1) != (sec_adjustment < 0))) return -1; If your code looks like these examples, it is probably safe even though it does not strictly conform to the C standard. This might lead one to believe that one can generally assume wraparound on overflow, but that is not always true, as can be seen in the next section.  File: autoconf.info, Node: Optimization and Wraparound, Next: Signed Overflow Advice, Prev: Signed Overflow Examples, Up: Integer Overflow 13.2.3 Optimizations That Break Wraparound Arithmetic ----------------------------------------------------- Compilers sometimes generate code that is incompatible with wraparound integer arithmetic. A simple example is an algebraic simplification: a compiler might translate ‘(i * 2000) / 1000’ to ‘i * 2’ because it assumes that ‘i * 2000’ does not overflow. The translation is not equivalent to the original when overflow occurs: e.g., in the typical case of 32-bit signed two’s complement wraparound ‘int’, if ‘i’ has type ‘int’ and value ‘1073742’, the original expression returns −2147483 but the optimized version returns the mathematically correct value 2147484. More subtly, loop induction optimizations often exploit the undefined behavior of signed overflow. Consider the following contrived function ‘sumc’: int sumc (int lo, int hi) { int sum = 0; int i; for (i = lo; i <= hi; i++) sum ^= i * 53; return sum; } To avoid multiplying by 53 each time through the loop, an optimizing compiler might internally transform ‘sumc’ to the equivalent of the following: int transformed_sumc (int lo, int hi) { int sum = 0; int hic = hi * 53; int ic; for (ic = lo * 53; ic <= hic; ic += 53) sum ^= ic; return sum; } This transformation is allowed by the C standard, but it is invalid for wraparound arithmetic when ‘INT_MAX / 53 < hi’, because then the overflow in computing expressions like ‘hi * 53’ can cause the expression ‘i <= hi’ to yield a different value from the transformed expression ‘ic <= hic’. For this reason, compilers that use loop induction and similar techniques often do not support reliable wraparound arithmetic when a loop induction variable like ‘ic’ is involved. Since loop induction variables are generated by the compiler, and are not visible in the source code, it is not always trivial to say whether the problem affects your code. Hardly any code actually depends on wraparound arithmetic in cases like these, so in practice these loop induction optimizations are almost always useful. However, edge cases in this area can cause problems. For example: int j; for (j = 1; 0 < j; j *= 2) test (j); Here, the loop attempts to iterate through all powers of 2 that ‘int’ can represent, but the C standard allows a compiler to optimize away the comparison and generate an infinite loop, under the argument that behavior is undefined on overflow. As of this writing this optimization is not done by any production version of GCC with ‘-O2’, but it might be performed by other compilers, or by more aggressive GCC optimization options, and the GCC developers have not decided whether it will continue to work with GCC and ‘-O2’.  File: autoconf.info, Node: Signed Overflow Advice, Next: Signed Integer Division, Prev: Optimization and Wraparound, Up: Integer Overflow 13.2.4 Practical Advice for Signed Overflow Issues -------------------------------------------------- Ideally the safest approach is to avoid signed integer overflow entirely. For example, instead of multiplying two signed integers, you can convert them to unsigned integers, multiply the unsigned values, then test whether the result is in signed range. Rewriting code in this way will be inconvenient, though, particularly if the signed values might be negative. Also, it may hurt performance. Using unsigned arithmetic to check for overflow is particularly painful to do portably and efficiently when dealing with an integer type like ‘uid_t’ whose width and signedness vary from platform to platform. Furthermore, many C applications pervasively assume wraparound behavior and typically it is not easy to find and remove all these assumptions. Hence it is often useful to maintain nonstandard code that assumes wraparound on overflow, instead of rewriting the code. The rest of this section attempts to give practical advice for this situation. If your code wants to detect signed integer overflow in ‘sum = a + b’, it is generally safe to use an expression like ‘(sum < a) != (b < 0)’. If your code uses a signed loop index, make sure that the index cannot overflow, along with all signed expressions derived from the index. Here is a contrived example of problematic code with two instances of overflow. for (i = INT_MAX - 10; i <= INT_MAX; i++) if (i + 1 < 0) { report_overflow (); break; } Because of the two overflows, a compiler might optimize away or transform the two comparisons in a way that is incompatible with the wraparound assumption. If your code uses an expression like ‘(i * 2000) / 1000’ and you actually want the multiplication to wrap around on overflow, use unsigned arithmetic to do it, e.g., ‘((int) (i * 2000u)) / 1000’. If your code assumes wraparound behavior and you want to insulate it against any GCC optimizations that would fail to support that behavior, you should use GCC’s ‘-fwrapv’ option, which causes signed overflow to wrap around reliably (except for division and remainder, as discussed in the next section). If you need to port to platforms where signed integer overflow does not reliably wrap around (e.g., due to hardware overflow checking, or to highly aggressive optimizations), you should consider debugging with GCC’s ‘-ftrapv’ option, which causes signed overflow to raise an exception.  File: autoconf.info, Node: Signed Integer Division, Prev: Signed Overflow Advice, Up: Integer Overflow 13.2.5 Signed Integer Division and Integer Overflow --------------------------------------------------- Overflow in signed integer division is not always harmless: for example, on CPUs of the i386 family, dividing ‘INT_MIN’ by ‘-1’ yields a SIGFPE signal which by default terminates the program. Worse, taking the remainder of these two values typically yields the same signal on these CPUs, even though the C standard requires ‘INT_MIN % -1’ to yield zero because the expression does not overflow.  File: autoconf.info, Node: Preprocessor Arithmetic, Next: Null Pointers, Prev: Integer Overflow, Up: Portable C and C++ 13.3 Preprocessor Arithmetic ============================ In C99 and later, preprocessor arithmetic, used for ‘#if’ expressions, must be evaluated as if all signed values are of type ‘intmax_t’ and all unsigned values of type ‘uintmax_t’. Many compilers are buggy in this area, though. For example, as of 2007, Sun C mishandles ‘#if LLONG_MIN < 0’ on a platform with 32-bit ‘long int’ and 64-bit ‘long long int’. Also, some older preprocessors mishandle constants ending in ‘LL’. To work around these problems, you can compute the value of expressions like ‘LONG_MAX < LLONG_MAX’ at ‘configure’-time rather than at ‘#if’-time.  File: autoconf.info, Node: Null Pointers, Next: Buffer Overruns, Prev: Preprocessor Arithmetic, Up: Portable C and C++ 13.4 Properties of Null Pointers ================================ Most modern hosts reliably fail when you attempt to dereference a null pointer. On almost all modern hosts, null pointers use an all-bits-zero internal representation, so you can reliably use ‘memset’ with 0 to set all the pointers in an array to null values. If ‘p’ is a null pointer to an object type, the C expression ‘p + 0’ always evaluates to ‘p’ on modern hosts, even though the standard says that it has undefined behavior.  File: autoconf.info, Node: Buffer Overruns, Next: Volatile Objects, Prev: Null Pointers, Up: Portable C and C++ 13.5 Buffer Overruns and Subscript Errors ========================================= Buffer overruns and subscript errors are the most common dangerous errors in C programs. They result in undefined behavior because storing outside an array typically modifies storage that is used by some other object, and most modern systems lack runtime checks to catch these errors. Programs should not rely on buffer overruns being caught. There is one exception to the usual rule that a portable program cannot address outside an array. In C, it is valid to compute the address just past an object, e.g., ‘&a[N]’ where ‘a’ has ‘N’ elements, so long as you do not dereference the resulting pointer. But it is not valid to compute the address just before an object, e.g., ‘&a[-1]’; nor is it valid to compute two past the end, e.g., ‘&a[N+1]’. On most platforms ‘&a[-1] < &a[0] && &a[N] < &a[N+1]’, but this is not reliable in general, and it is usually easy enough to avoid the potential portability problem, e.g., by allocating an extra unused array element at the start or end. Valgrind (https://www.valgrind.org/) can catch many overruns. GCC users might also consider using the ‘-fsanitize=’ options to catch overruns. *Note Program Instrumentation Options: ( gcc)Instrumentation Options. Buffer overruns are usually caused by off-by-one errors, but there are more subtle ways to get them. Using ‘int’ values to index into an array or compute array sizes causes problems on typical 64-bit hosts where an array index might be 2^{31} or larger. Index values of type ‘size_t’ avoid this problem, but cannot be negative. Index values of type ‘ptrdiff_t’ are signed, and are wide enough in practice. If you add or multiply two numbers to calculate an array size, e.g., ‘malloc (x * sizeof y + z)’, havoc ensues if the addition or multiplication overflows. Many implementations of the ‘alloca’ function silently misbehave and can generate buffer overflows if given sizes that are too large. The size limits are implementation dependent, but are at least 4000 bytes on all platforms that we know about. The standard functions ‘asctime’, ‘asctime_r’, ‘ctime’, ‘ctime_r’, and ‘gets’ are prone to buffer overflows, and portable code should not use them unless the inputs are known to be within certain limits. The time-related functions can overflow their buffers if given timestamps out of range (e.g., a year less than -999 or greater than 9999). Time-related buffer overflows cannot happen with recent-enough versions of the GNU C library, but are possible with other implementations. The ‘gets’ function is the worst, since it almost invariably overflows its buffer when presented with an input line larger than the buffer.  File: autoconf.info, Node: Volatile Objects, Next: Floating Point Portability, Prev: Buffer Overruns, Up: Portable C and C++ 13.6 Volatile Objects ===================== The keyword ‘volatile’ is often misunderstood in portable code. Its use inhibits some memory-access optimizations, but programmers often wish that it had a different meaning than it actually does. ‘volatile’ was designed for code that accesses special objects like memory-mapped device registers whose contents spontaneously change. Such code is inherently low-level, and it is difficult to specify portably what ‘volatile’ means in these cases. The C standard says, “What constitutes an access to an object that has volatile-qualified type is implementation-defined,” so in theory each implementation is supposed to fill in the gap by documenting what ‘volatile’ means for that implementation. In practice, though, this documentation is usually absent or incomplete. One area of confusion is the distinction between objects defined with volatile types, and volatile lvalues. From the C standard’s point of view, an object defined with a volatile type has externally visible behavior. You can think of such objects as having little oscilloscope probes attached to them, so that the user can observe some properties of accesses to them, just as the user can observe data written to output files. However, the standard does not make it clear whether users can observe accesses by volatile lvalues to ordinary objects. For example: /* Declare and access a volatile object. Accesses to X are "visible" to users. */ static int volatile x; x = 1; /* Access two ordinary objects via a volatile lvalue. It's not clear whether accesses to *P are "visible". */ int y; int *z = malloc (sizeof (int)); int volatile *p; p = &y; *p = 1; p = z; *p = 1; Programmers often wish that ‘volatile’ meant “Perform the memory access here and now, without merging several memory accesses, without changing the memory word size, and without reordering.” But the C standard does not require this. For objects defined with a volatile type, accesses must be done before the next sequence point; but otherwise merging, reordering, and word-size change is allowed. Worse, it is not clear from the standard whether volatile lvalues provide more guarantees in general than nonvolatile lvalues, if the underlying objects are ordinary. Even when accessing objects defined with a volatile type, the C standard allows only extremely limited signal handlers: in C99 the behavior is undefined if a signal handler reads any non-local object, or writes to any non-local object whose type is not ‘sig_atomic_t volatile’, or calls any standard library function other than ‘abort’, ‘signal’, and ‘_Exit’. Hence C compilers need not worry about a signal handler disturbing ordinary computation. C11 and Posix allow some additional behavior in a portable signal handler, but are still quite restrictive. Some C implementations allow memory-access optimizations within each translation unit, such that actual behavior agrees with the behavior required by the standard only when calling a function in some other translation unit, and a signal handler acts like it was called from a different translation unit. The C99 standard hints that in these implementations, objects referred to by signal handlers “would require explicit specification of ‘volatile’ storage, as well as other implementation-defined restrictions.” But unfortunately even for this special case these other restrictions are often not documented well. This area was significantly changed in C11, and eventually implementations will probably head in the C11 direction, but this will take some time. *Note When is a Volatile Object Accessed?: (gcc)Volatiles, for some restrictions imposed by GCC. *Note Defining Signal Handlers: (libc)Defining Handlers, for some restrictions imposed by the GNU C library. Restrictions differ on other platforms. If possible, it is best to use a signal handler that fits within the limits imposed by the C and Posix standards. If this is not practical, you can try the following rules of thumb. A signal handler should access only volatile lvalues, preferably lvalues that refer to objects defined with a volatile type, and should not assume that the accessed objects have an internally consistent state if they are larger than a machine word. Furthermore, installers should employ compilers and compiler options that are commonly used for building operating system kernels, because kernels often need more from ‘volatile’ than the C Standard requires, and installers who compile an application in a similar environment can sometimes benefit from the extra constraints imposed by kernels on compilers. Admittedly we are hand-waving somewhat here, as there are few guarantees in this area; the rules of thumb may help to fix some bugs but there is a good chance that they will not fix them all. For ‘volatile’, C++ has the same problems that C does. Multithreaded applications have even more problems with ‘volatile’, but they are beyond the scope of this section. The bottom line is that using ‘volatile’ typically hurts performance but should not hurt correctness. In some cases its use does help correctness, but these cases are often so poorly understood that all too often adding ‘volatile’ to a data structure merely alleviates some symptoms of a bug while not fixing the bug in general.  File: autoconf.info, Node: Floating Point Portability, Next: Exiting Portably, Prev: Volatile Objects, Up: Portable C and C++ 13.7 Floating Point Portability =============================== Almost all modern systems use IEEE-754 floating point, and it is safe to assume IEEE-754 in most portable code these days. For more information, please see David Goldberg’s classic paper What Every Computer Scientist Should Know About Floating-Point Arithmetic (http://www.validlab.com/goldberg/paper.pdf).  File: autoconf.info, Node: Exiting Portably, Prev: Floating Point Portability, Up: Portable C and C++ 13.8 Exiting Portably ===================== A C or C++ program can exit with status N by returning N from the ‘main’ function. Portable programs are supposed to exit either with status 0 or ‘EXIT_SUCCESS’ to succeed, or with status ‘EXIT_FAILURE’ to fail, but in practice it is portable to fail by exiting with status 1, and test programs that assume Posix can fail by exiting with status values from 1 through 255. Programs on SunOS 2.0 (1985) through 3.5.2 (1988) incorrectly exited with zero status when ‘main’ returned nonzero, but ancient systems like these are no longer of practical concern. A program can also exit with status N by passing N to the ‘exit’ function, and a program can fail by calling the ‘abort’ function. If a program is specialized to just some platforms, it can fail by calling functions specific to those platforms, e.g., ‘_exit’ (Posix). However, like other functions, an exit function should be declared, typically by including a header. For example, if a C program calls ‘exit’, it should include ‘stdlib.h’ either directly or via the default includes (*note Default Includes::). A program can fail due to undefined behavior such as dereferencing a null pointer, but this is not recommended as undefined behavior allows an implementation to do whatever it pleases and this includes exiting successfully.  File: autoconf.info, Node: Manual Configuration, Next: Site Configuration, Prev: Portable C and C++, Up: Top 14 Manual Configuration *********************** A few kinds of features can’t be guessed automatically by running test programs. For example, the details of the object-file format, or special options that need to be passed to the compiler or linker. Autoconf provides a uniform method for handling unguessable features, by giving each operating system a “canonical system type”, also known as a “canonical name” or “target triplet”. If you use any of the macros described in this chapter, you must distribute the helper scripts ‘config.guess’ and ‘config.sub’ along with your source code. Some Autoconf macros use these macros internally, so you may need to distribute these scripts even if you do not use any of these macros yourself. *Note Input::, for information about the ‘AC_CONFIG_AUX_DIR’ macro which you can use to control in which directory ‘configure’ looks for helper scripts, and where to get the scripts from. * Menu: * Specifying Target Triplets:: Specifying target triplets * Canonicalizing:: Getting the canonical system type * Using System Type:: What to do with the system type  File: autoconf.info, Node: Specifying Target Triplets, Next: Canonicalizing, Up: Manual Configuration 14.1 Specifying target triplets =============================== Autoconf-generated ‘configure’ scripts can make decisions based on a canonical name for the system type, or “target triplet”, which has the form: ‘CPU-VENDOR-OS’, where OS can be ‘SYSTEM’ or ‘KERNEL-SYSTEM’ ‘configure’ can usually guess the canonical name for the type of system it’s running on. To do so it runs a script called ‘config.guess’, which infers the name using the ‘uname’ command or symbols predefined by the C preprocessor. Alternately, the user can specify the system type with command line arguments to ‘configure’ (*note System Type::. Doing so is necessary when cross-compiling. In the most complex case of cross-compiling, three system types are involved. The options to specify them are: ‘--build=BUILD-TYPE’ the type of system on which the package is being configured and compiled. It defaults to the result of running ‘config.guess’. Specifying a BUILD-TYPE that differs from HOST-TYPE enables cross-compilation mode. ‘--host=HOST-TYPE’ the type of system on which the package runs. By default it is the same as the build machine. The tools that get used to build and manipulate binaries will, by default, all be prefixed with ‘HOST-TYPE-’, such as ‘HOST-TYPE-gcc’, ‘HOST-TYPE-g++’, ‘HOST-TYPE-ar’, and ‘HOST-TYPE-nm’. If the binaries produced by these tools can be executed by the build system, the configure script will make use of it in ‘AC_RUN_IFELSE’ invocations; otherwise, cross-compilation mode is enabled. Specifying a HOST-TYPE that differs from BUILD-TYPE, when BUILD-TYPE was also explicitly specified, equally enables cross-compilation mode. ‘--target=TARGET-TYPE’ the type of system for which any compiler tools in the package produce code (rarely needed). By default, it is the same as host. If you mean to override the result of ‘config.guess’ but still produce binaries for the build machine, use ‘--build’, not ‘--host’. So, for example, to produce binaries for 64-bit MinGW, use a command like this: ./configure --host=x86_64-w64-mingw64 If your system has the ability to execute MinGW binaries but you don’t want to make use of this feature and instead prefer cross-compilation guesses, use a command like this: ./configure --build=x86_64-pc-linux-gnu --host=x86_64-w64-mingw64 Note that if you do not specify ‘--host’, ‘configure’ fails if it can’t run the code generated by the specified compiler. For example, configuring as follows fails: ./configure CC=x86_64-w64-mingw64-gcc When cross-compiling, ‘configure’ will warn about any tools (compilers, linkers, assemblers) whose name is not prefixed with the host type. This is an aid to users performing cross-compilation. Continuing the example above, if a cross-compiler named ‘cc’ is used with a native ‘pkg-config’, then libraries found by ‘pkg-config’ will likely cause subtle build failures; but using the names ‘x86_64-w64-mingw64-gcc’ and ‘x86_64-w64-mingw64-pkg-config’ avoids any confusion. Avoiding the warning is as simple as creating the correct symlinks naming the cross tools. ‘configure’ recognizes short aliases for many system types; for example, ‘decstation’ can be used instead of ‘mips-dec-ultrix4.2’. ‘configure’ runs a script called ‘config.sub’ to canonicalize system type aliases. This section deliberately omits the description of the obsolete interface; see *note Hosts and Cross-Compilation::.  File: autoconf.info, Node: Canonicalizing, Next: Using System Type, Prev: Specifying Target Triplets, Up: Manual Configuration 14.2 Getting the Canonical System Type ====================================== The following macros make the system type available to ‘configure’ scripts. The variables ‘build_alias’, ‘host_alias’, and ‘target_alias’ are always exactly the arguments of ‘--build’, ‘--host’, and ‘--target’; in particular, they are left empty if the user did not use them, even if the corresponding ‘AC_CANONICAL’ macro was run. Any configure script may use these variables anywhere. These are the variables that should be used when in interaction with the user. If you need to recognize some special environments based on their system type, run the following macros to get canonical system names. These variables are not set before the macro call. -- Macro: AC_CANONICAL_BUILD Compute the canonical build-system type variable, ‘build’, and its three individual parts ‘build_cpu’, ‘build_vendor’, and ‘build_os’. If ‘--build’ was specified, then ‘build’ is the canonicalization of ‘build_alias’ by ‘config.sub’, otherwise it is determined by the shell script ‘config.guess’. -- Macro: AC_CANONICAL_HOST Compute the canonical host-system type variable, ‘host’, and its three individual parts ‘host_cpu’, ‘host_vendor’, and ‘host_os’. If ‘--host’ was specified, then ‘host’ is the canonicalization of ‘host_alias’ by ‘config.sub’, otherwise it defaults to ‘build’. -- Macro: AC_CANONICAL_TARGET Compute the canonical target-system type variable, ‘target’, and its three individual parts ‘target_cpu’, ‘target_vendor’, and ‘target_os’. If ‘--target’ was specified, then ‘target’ is the canonicalization of ‘target_alias’ by ‘config.sub’, otherwise it defaults to ‘host’. Note that there can be artifacts due to the backward compatibility code. *Note Hosts and Cross-Compilation::, for more.  File: autoconf.info, Node: Using System Type, Prev: Canonicalizing, Up: Manual Configuration 14.3 Using the System Type ========================== In ‘configure.ac’ the system type is generally used by one or more ‘case’ statements to select system-specifics. Shell wildcards can be used to match a group of system types. For example, an extra assembler code object file could be chosen, giving access to a CPU cycle counter register. ‘$(CYCLE_OBJ)’ in the following would be used in a makefile to add the object to a program or library. AS_CASE([$host], [alpha*-*-*], [CYCLE_OBJ=rpcc.o], [i?86-*-*], [CYCLE_OBJ=rdtsc.o], [CYCLE_OBJ=""] ) AC_SUBST([CYCLE_OBJ]) ‘AC_CONFIG_LINKS’ (*note Configuration Links::) is another good way to select variant source files, for example optimized code for some CPUs. The configured CPU type doesn’t always indicate exact CPU types, so some runtime capability checks may be necessary too. case $host in alpha*-*-*) AC_CONFIG_LINKS([dither.c:alpha/dither.c]) ;; powerpc*-*-*) AC_CONFIG_LINKS([dither.c:powerpc/dither.c]) ;; *-*-*) AC_CONFIG_LINKS([dither.c:generic/dither.c]) ;; esac The host system type can also be used to find cross-compilation tools with ‘AC_CHECK_TOOL’ (*note Generic Programs::). The above examples all show ‘$host’, since this is where the code is going to run. Only rarely is it necessary to test ‘$build’ (which is where the build is being done). Whenever you’re tempted to use ‘$host’ it’s worth considering whether some sort of probe would be better. New system types come along periodically or previously missing features are added. Well-written probes can adapt themselves to such things, but hard-coded lists of names can’t. Here are some guidelines, • Availability of libraries and library functions should always be checked by probing. • Variant behavior of system calls is best identified with runtime tests if possible, but bug workarounds or obscure difficulties might have to be driven from ‘$host’. • Assembler code is inevitably highly CPU-specific and is best selected according to ‘$host_cpu’. • Assembler variations like underscore prefix on globals or ELF versus COFF type directives are however best determined by probing, perhaps even examining the compiler output. ‘$target’ is for use by a package creating a compiler or similar. For ordinary packages it’s meaningless and should not be used. It indicates what the created compiler should generate code for, if it can cross-compile. ‘$target’ generally selects various hard-coded CPU and system conventions, since usually the compiler or tools under construction themselves determine how the target works.  File: autoconf.info, Node: Site Configuration, Next: Running configure Scripts, Prev: Manual Configuration, Up: Top 15 Site Configuration ********************* ‘configure’ scripts support several kinds of local configuration decisions. There are ways for users to specify where external software packages are, include or exclude optional features, install programs under modified names, and set default values for ‘configure’ options. * Menu: * Help Formatting:: Customizing ‘configure --help’ * External Software:: Working with other optional software * Package Options:: Selecting optional features * Pretty Help Strings:: Formatting help string * Option Checking:: Controlling checking of ‘configure’ options * Site Details:: Configuring site details * Transforming Names:: Changing program names when installing * Site Defaults:: Giving ‘configure’ local defaults  File: autoconf.info, Node: Help Formatting, Next: External Software, Up: Site Configuration 15.1 Controlling Help Output ============================ Users consult ‘configure --help’ to learn of configuration decisions specific to your package. By default, ‘configure’ breaks this output into sections for each type of option; within each section, help strings appear in the order ‘configure.ac’ defines them: Optional Features: ... --enable-bar include bar Optional Packages: ... --with-foo use foo -- Macro: AC_PRESERVE_HELP_ORDER Request an alternate ‘--help’ format, in which options of all types appear together, in the order defined. Call this macro before any ‘AC_ARG_ENABLE’ or ‘AC_ARG_WITH’. Optional Features and Packages: ... --enable-bar include bar --with-foo use foo  File: autoconf.info, Node: External Software, Next: Package Options, Prev: Help Formatting, Up: Site Configuration 15.2 Working With External Software =================================== Some packages require, or can optionally use, other software packages that are already installed. The user can give ‘configure’ command line options to specify which such external software to use. The options have one of these forms: --with-PACKAGE[=ARG] --without-PACKAGE For example, ‘--with-gnu-ld’ means work with the GNU linker instead of some other linker. ‘--with-x’ means work with The X Window System. The user can give an argument by following the package name with ‘=’ and the argument. Giving an argument of ‘no’ is for packages that are used by default; it says to _not_ use the package. An argument that is neither ‘yes’ nor ‘no’ could include a name or number of a version of the other package, to specify more precisely which other package this program is supposed to work with. If no argument is given, it defaults to ‘yes’. ‘--without-PACKAGE’ is equivalent to ‘--with-PACKAGE=no’. Normally ‘configure’ scripts complain about ‘--with-PACKAGE’ options that they do not support. *Note Option Checking::, for details, and for how to override the defaults. For each external software package that may be used, ‘configure.ac’ should call ‘AC_ARG_WITH’ to detect whether the ‘configure’ user asked to use it. Whether each package is used or not by default, and which arguments are valid, is up to you. -- Macro: AC_ARG_WITH (PACKAGE, HELP-STRING, [ACTION-IF-GIVEN], [ACTION-IF-NOT-GIVEN]) If the user gave ‘configure’ the option ‘--with-PACKAGE’ or ‘--without-PACKAGE’, run shell commands ACTION-IF-GIVEN. If neither option was given, run shell commands ACTION-IF-NOT-GIVEN. The name PACKAGE indicates another software package that this program should work with. It should consist only of alphanumeric characters, dashes, plus signs, and dots. The option’s argument is available to the shell commands ACTION-IF-GIVEN in the shell variable ‘withval’, which is actually just the value of the shell variable named ‘with_PACKAGE’, with any non-alphanumeric characters in PACKAGE changed into ‘_’. You may use that variable instead, if you wish. Note that ACTION-IF-NOT-GIVEN is not expanded until the point that ‘AC_ARG_WITH’ was expanded. If you need the value of ‘with_PACKAGE’ set to a default value by the time argument parsing is completed, use ‘m4_divert_text’ to the ‘DEFAULTS’ diversion (*note m4_divert_text::) (if done as an argument to ‘AC_ARG_WITH’, also provide non-diverted text to avoid a shell syntax error). The argument HELP-STRING is a description of the option that looks like this: --with-readline support fancy command line editing HELP-STRING may be more than one line long, if more detail is needed. Just make sure the columns line up in ‘configure --help’. Avoid tabs in the help string. The easiest way to provide the proper leading whitespace is to format your HELP-STRING with the macro ‘AS_HELP_STRING’ (*note Pretty Help Strings::). The following example shows how to use the ‘AC_ARG_WITH’ macro in a common situation. You want to let the user decide whether to enable support for an external library (e.g., the readline library); if the user specified neither ‘--with-readline’ nor ‘--without-readline’, you want to enable support for readline only if the library is available on the system. AC_ARG_WITH([readline], [AS_HELP_STRING([--with-readline], [support fancy command line editing @<:@default=check@:>@])], [], [: m4_divert_text([DEFAULTS], [with_readline=check])]) LIBREADLINE= AS_IF([test "x$with_readline" != xno], [AC_CHECK_LIB([readline], [main], [AC_SUBST([LIBREADLINE], ["-lreadline -lncurses"]) AC_DEFINE([HAVE_LIBREADLINE], [1], [Define if you have libreadline]) ], [if test "x$with_readline" != xcheck; then AC_MSG_FAILURE( [--with-readline was given, but test for readline failed]) fi ], -lncurses)]) The next example shows how to use ‘AC_ARG_WITH’ to give the user the possibility to enable support for the readline library, in case it is still experimental and not well tested, and is therefore disabled by default. AC_ARG_WITH([readline], [AS_HELP_STRING([--with-readline], [enable experimental support for readline])], [], [with_readline=no]) LIBREADLINE= AS_IF([test "x$with_readline" != xno], [AC_CHECK_LIB([readline], [main], [AC_SUBST([LIBREADLINE], ["-lreadline -lncurses"]) AC_DEFINE([HAVE_LIBREADLINE], [1], [Define if you have libreadline]) ], [AC_MSG_FAILURE( [--with-readline was given, but test for readline failed])], [-lncurses])]) The last example shows how to use ‘AC_ARG_WITH’ to give the user the possibility to disable support for the readline library, given that it is an important feature and that it should be enabled by default. AC_ARG_WITH([readline], [AS_HELP_STRING([--without-readline], [disable support for readline])], [], [with_readline=yes]) LIBREADLINE= AS_IF([test "x$with_readline" != xno], [AC_CHECK_LIB([readline], [main], [AC_SUBST([LIBREADLINE], ["-lreadline -lncurses"]) AC_DEFINE([HAVE_LIBREADLINE], [1], [Define if you have libreadline]) ], [AC_MSG_FAILURE( [readline test failed (--without-readline to disable)])], [-lncurses])]) These three examples can be easily adapted to the case where ‘AC_ARG_ENABLE’ should be preferred to ‘AC_ARG_WITH’ (see *note Package Options::).  File: autoconf.info, Node: Package Options, Next: Pretty Help Strings, Prev: External Software, Up: Site Configuration 15.3 Choosing Package Options ============================= If a software package has optional compile-time features, the user can give ‘configure’ command line options to specify whether to compile them. The options have one of these forms: --enable-FEATURE[=ARG] --disable-FEATURE These options allow users to choose which optional features to build and install. ‘--enable-FEATURE’ options should never make a feature behave differently or cause one feature to replace another. They should only cause parts of the program to be built rather than left out. The user can give an argument by following the feature name with ‘=’ and the argument. Giving an argument of ‘no’ requests that the feature _not_ be made available. A feature with an argument looks like ‘--enable-debug=stabs’. If no argument is given, it defaults to ‘yes’. ‘--disable-FEATURE’ is equivalent to ‘--enable-FEATURE=no’. Normally ‘configure’ scripts complain about ‘--enable-PACKAGE’ options that they do not support. *Note Option Checking::, for details, and for how to override the defaults. For each optional feature, ‘configure.ac’ should call ‘AC_ARG_ENABLE’ to detect whether the ‘configure’ user asked to include it. Whether each feature is included or not by default, and which arguments are valid, is up to you. -- Macro: AC_ARG_ENABLE (FEATURE, HELP-STRING, [ACTION-IF-GIVEN], [ACTION-IF-NOT-GIVEN]) If the user gave ‘configure’ the option ‘--enable-FEATURE’ or ‘--disable-FEATURE’, run shell commands ACTION-IF-GIVEN. If neither option was given, run shell commands ACTION-IF-NOT-GIVEN. The name FEATURE indicates an optional user-level facility. It should consist only of alphanumeric characters, dashes, plus signs, and dots. The option’s argument is available to the shell commands ACTION-IF-GIVEN in the shell variable ‘enableval’, which is actually just the value of the shell variable named ‘enable_FEATURE’, with any non-alphanumeric characters in FEATURE changed into ‘_’. You may use that variable instead, if you wish. The HELP-STRING argument is like that of ‘AC_ARG_WITH’ (*note External Software::). Note that ACTION-IF-NOT-GIVEN is not expanded until the point that ‘AC_ARG_ENABLE’ was expanded. If you need the value of ‘enable_FEATURE’ set to a default value by the time argument parsing is completed, use ‘m4_divert_text’ to the ‘DEFAULTS’ diversion (*note m4_divert_text::) (if done as an argument to ‘AC_ARG_ENABLE’, also provide non-diverted text to avoid a shell syntax error). You should format your HELP-STRING with the macro ‘AS_HELP_STRING’ (*note Pretty Help Strings::). See the examples suggested with the definition of ‘AC_ARG_WITH’ (*note External Software::) to get an idea of possible applications of ‘AC_ARG_ENABLE’.  File: autoconf.info, Node: Pretty Help Strings, Next: Option Checking, Prev: Package Options, Up: Site Configuration 15.4 Making Your Help Strings Look Pretty ========================================= Properly formatting the ‘help strings’ which are used in ‘AC_ARG_WITH’ (*note External Software::) and ‘AC_ARG_ENABLE’ (*note Package Options::) can be challenging. Specifically, you want your own ‘help strings’ to line up in the appropriate columns of ‘configure --help’ just like the standard Autoconf ‘help strings’ do. This is the purpose of the ‘AS_HELP_STRING’ macro. -- Macro: AS_HELP_STRING (LEFT-HAND-SIDE, RIGHT-HAND-SIDE [INDENT-COLUMN = ‘26’], [WRAP-COLUMN = ‘79’]) Expands into a help string that looks pretty when the user executes ‘configure --help’. It is typically used in ‘AC_ARG_WITH’ (*note External Software::) or ‘AC_ARG_ENABLE’ (*note Package Options::). The following example makes this clearer. AC_ARG_WITH([foo], [AS_HELP_STRING([--with-foo], [use foo (default is no)])], [use_foo=$withval], [use_foo=no]) Then the last few lines of ‘configure --help’ appear like this: --enable and --with options recognized: --with-foo use foo (default is no) Macro expansion is performed on the first argument. However, the second argument of ‘AS_HELP_STRING’ is treated as a whitespace separated list of text to be reformatted, and is not subject to macro expansion. Since it is not expanded, it should not be double quoted. *Note Autoconf Language::, for a more detailed explanation. The ‘AS_HELP_STRING’ macro is particularly helpful when the LEFT-HAND-SIDE and/or RIGHT-HAND-SIDE are composed of macro arguments, as shown in the following example. Be aware that LEFT-HAND-SIDE may not expand to unbalanced quotes, although quadrigraphs can be used. AC_DEFUN([MY_ARG_WITH], [AC_ARG_WITH(m4_translit([[$1]], [_], [-]), [AS_HELP_STRING([--with-m4_translit([$1], [_], [-])], [use $1 (default is $2)])], [use_[]$1=$withval], [use_[]$1=$2])]) MY_ARG_WITH([a_b], [no]) Here, the last few lines of ‘configure --help’ will include: --enable and --with options recognized: --with-a-b use a_b (default is no) The parameters INDENT-COLUMN and WRAP-COLUMN were introduced in Autoconf 2.62. Generally, they should not be specified; they exist for fine-tuning of the wrapping. AS_HELP_STRING([--option], [description of option]) ⇒ --option description of option AS_HELP_STRING([--option], [description of option], [15], [30]) ⇒ --option description of ⇒ option  File: autoconf.info, Node: Option Checking, Next: Site Details, Prev: Pretty Help Strings, Up: Site Configuration 15.5 Controlling Checking of ‘configure’ Options ================================================ The ‘configure’ script checks its command-line options against a list of known options, like ‘--help’ or ‘--config-cache’. An unknown option ordinarily indicates a mistake by the user and ‘configure’ halts with an error. However, by default unknown ‘--with-PACKAGE’ and ‘--enable-FEATURE’ options elicit only a warning, to support configuring entire source trees. Source trees often contain multiple packages with a top-level ‘configure’ script that uses the ‘AC_CONFIG_SUBDIRS’ macro (*note Subdirectories::). Because the packages generally support different ‘--with-PACKAGE’ and ‘--enable-FEATURE’ options, the GNU Coding Standards say they must accept unrecognized options without halting. Even a warning message is undesirable here, so ‘AC_CONFIG_SUBDIRS’ automatically disables the warnings. This default behavior may be modified in two ways. First, the installer can invoke ‘configure --disable-option-checking’ to disable these warnings, or invoke ‘configure --enable-option-checking=fatal’ options to turn them into fatal errors, respectively. Second, the maintainer can use ‘AC_DISABLE_OPTION_CHECKING’. -- Macro: AC_DISABLE_OPTION_CHECKING By default, disable warnings related to any unrecognized ‘--with-PACKAGE’ or ‘--enable-FEATURE’ options. This is implied by ‘AC_CONFIG_SUBDIRS’. The installer can override this behavior by passing ‘--enable-option-checking’ (enable warnings) or ‘--enable-option-checking=fatal’ (enable errors) to ‘configure’.  File: autoconf.info, Node: Site Details, Next: Transforming Names, Prev: Option Checking, Up: Site Configuration 15.6 Configuring Site Details ============================= Some software packages require complex site-specific information. Some examples are host names to use for certain services, company names, and email addresses to contact. Since some configuration scripts generated by Metaconfig ask for such information interactively, people sometimes wonder how to get that information in Autoconf-generated configuration scripts, which aren’t interactive. Such site configuration information should be put in a file that is edited _only by users_, not by programs. The location of the file can either be based on the ‘prefix’ variable, or be a standard location such as the user’s home directory. It could even be specified by an environment variable. The programs should examine that file at runtime, rather than at compile time. Runtime configuration is more convenient for users and makes the configuration process simpler than getting the information while configuring. *Note Variables for Installation Directories: (standards)Directory Variables, for more information on where to put data files.  File: autoconf.info, Node: Transforming Names, Next: Site Defaults, Prev: Site Details, Up: Site Configuration 15.7 Transforming Program Names When Installing =============================================== Autoconf supports changing the names of programs when installing them. In order to use these transformations, ‘configure.ac’ must call the macro ‘AC_ARG_PROGRAM’. -- Macro: AC_ARG_PROGRAM Place in output variable ‘program_transform_name’ a sequence of ‘sed’ commands for changing the names of installed programs. If any of the options described below are given to ‘configure’, program names are transformed accordingly. Otherwise, if ‘AC_CANONICAL_TARGET’ has been called and a ‘--target’ value is given, the target type followed by a dash is used as a prefix. Otherwise, no program name transformation is done. * Menu: * Transformation Options:: ‘configure’ options to transform names * Transformation Examples:: Sample uses of transforming names * Transformation Rules:: Makefile uses of transforming names  File: autoconf.info, Node: Transformation Options, Next: Transformation Examples, Up: Transforming Names 15.7.1 Transformation Options ----------------------------- You can specify name transformations by giving ‘configure’ these command line options: ‘--program-prefix=PREFIX’ prepend PREFIX to the names; ‘--program-suffix=SUFFIX’ append SUFFIX to the names; ‘--program-transform-name=EXPRESSION’ perform ‘sed’ substitution EXPRESSION on the names.  File: autoconf.info, Node: Transformation Examples, Next: Transformation Rules, Prev: Transformation Options, Up: Transforming Names 15.7.2 Transformation Examples ------------------------------ These transformations are useful with programs that can be part of a cross-compilation development environment. For example, a cross-assembler running on x86-64 configured with ‘--target=aarch64-linux-gnu’ is normally installed as ‘aarch64-linux-gnu-as’, rather than ‘as’, which could be confused with a native x86-64 assembler. You can force a program name to begin with ‘g’, if you don’t want GNU programs installed on your system to shadow other programs with the same name. For example, if you configure GNU ‘diff’ with ‘--program-prefix=g’, then when you run ‘make install’ it is installed as ‘/usr/local/bin/gdiff’. As a more sophisticated example, you could use --program-transform-name='s/^/g/; s/^gg/g/; s/^gless/less/' to prepend ‘g’ to most of the program names in a source tree, excepting those like ‘gdb’ that already have one and those like ‘less’ and ‘lesskey’ that aren’t GNU programs. (That is assuming that you have a source tree containing those programs that is set up to use this feature.) One way to install multiple versions of some programs simultaneously is to append a version number to the name of one or both. For example, if you want to keep Autoconf version 1 around for awhile, you can configure Autoconf version 2 using ‘--program-suffix=2’ to install the programs as ‘/usr/local/bin/autoconf2’, ‘/usr/local/bin/autoheader2’, etc. Nevertheless, pay attention that only the binaries are renamed, therefore you’d have problems with the library files which might overlap.  File: autoconf.info, Node: Transformation Rules, Prev: Transformation Examples, Up: Transforming Names 15.7.3 Transformation Rules --------------------------- Here is how to use the variable ‘program_transform_name’ in a ‘Makefile.in’: PROGRAMS = cp ls rm transform = @program_transform_name@ install: for p in $(PROGRAMS); do \ $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/`echo $$p | \ sed '$(transform)'`; \ done uninstall: for p in $(PROGRAMS); do \ rm -f $(DESTDIR)$(bindir)/`echo $$p | sed '$(transform)'`; \ done It is guaranteed that ‘program_transform_name’ is never empty, and that there are no useless separators. Therefore you may safely embed ‘program_transform_name’ within a sed program using ‘;’: transform = @program_transform_name@ transform_exe = s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/ Whether to do the transformations on documentation files (Texinfo or ‘man’) is a tricky question; there seems to be no perfect answer, due to the several reasons for name transforming. Documentation is not usually particular to a specific architecture, and Texinfo files do not conflict with system documentation. But they might conflict with earlier versions of the same files, and ‘man’ pages sometimes do conflict with system documentation. As a compromise, it is probably best to do name transformations on ‘man’ pages but not on Texinfo manuals.  File: autoconf.info, Node: Site Defaults, Prev: Transforming Names, Up: Site Configuration 15.8 Setting Site Defaults ========================== Autoconf-generated ‘configure’ scripts allow your site to provide default values for some configuration values. You do this by creating site- and system-wide initialization files. If the environment variable ‘CONFIG_SITE’ is set, ‘configure’ uses its value as a space-separated list of shell scripts to read; it is recommended that these be absolute file names. Otherwise, it reads the shell script ‘PREFIX/share/config.site’ if it exists, then ‘PREFIX/etc/config.site’ if it exists. Thus, settings in machine-specific files override those in machine-independent ones in case of conflict. Site files can be arbitrary shell scripts, but only certain kinds of code are really appropriate to be in them. Because ‘configure’ reads any cache file after it has read any site files, a site file can define a default cache file to be shared between all Autoconf-generated ‘configure’ scripts run on that system (*note Cache Files::). If you set a default cache file in a site file, it is a good idea to also set the output variable ‘CC’ in that site file, because the cache file is only valid for a particular compiler, but many systems have several available. You can examine or override the value set by a command line option to ‘configure’ in a site file; options set shell variables that have the same names as the options, with any dashes turned into underscores. The exceptions are that ‘--without-’ and ‘--disable-’ options are like giving the corresponding ‘--with-’ or ‘--enable-’ option and the value ‘no’. Thus, ‘--cache-file=localcache’ sets the variable ‘cache_file’ to the value ‘localcache’; ‘--enable-warnings=no’ or ‘--disable-warnings’ sets the variable ‘enable_warnings’ to the value ‘no’; ‘--prefix=/usr’ sets the variable ‘prefix’ to the value ‘/usr’; etc. Site files are also good places to set default values for other output variables, such as ‘CFLAGS’, if you need to give them non-default values: anything you would normally do, repetitively, on the command line. If you use non-default values for PREFIX or EXEC_PREFIX (wherever you locate the site file), you can set them in the site file if you specify it with the ‘CONFIG_SITE’ environment variable. You can set some cache values in the site file itself. Doing this is useful if you are cross-compiling, where it is impossible to check features that require running a test program. You could “prime the cache” by setting those values correctly for that system in ‘PREFIX/etc/config.site’. To find out the names of the cache variables you need to set, see the documentation of the respective Autoconf macro. If the variables or their semantics are undocumented, you may need to look for shell variables with ‘_cv_’ in their names in the affected ‘configure’ scripts, or in the Autoconf M4 source code for those macros; but in that case, their name or semantics may change in a future Autoconf version. The cache file is careful to not override any variables set in the site files. Similarly, you should not override command-line options in the site files. Your code should check that variables such as ‘prefix’ and ‘cache_file’ have their default values (as set near the top of ‘configure’) before changing them. Here is a sample file ‘/usr/share/local/gnu/share/config.site’. The command ‘configure --prefix=/usr/share/local/gnu’ would read this file (if ‘CONFIG_SITE’ is not set to a different file). # /usr/share/local/gnu/share/config.site for configure # # Change some defaults. test "$prefix" = NONE && prefix=/usr/share/local/gnu test "$exec_prefix" = NONE && exec_prefix=/usr/local/gnu test "$sharedstatedir" = '${prefix}/com' && sharedstatedir=/var test "$localstatedir" = '${prefix}/var' && localstatedir=/var test "$runstatedir" = '${localstatedir}/run' && runstatedir=/run # Give Autoconf 2.x generated configure scripts a shared default # cache file for feature test results, architecture-specific. if test "$cache_file" = /dev/null; then cache_file="$prefix/var/config.cache" # A cache file is only valid for one C compiler. CC=gcc fi Another use of ‘config.site’ is for priming the directory variables in a manner consistent with the Filesystem Hierarchy Standard (FHS). Once the following file is installed at ‘/usr/share/config.site’, a user can execute simply ‘./configure --prefix=/usr’ to get all the directories chosen in the locations recommended by FHS. # /usr/share/config.site for FHS defaults when installing below /usr, # and the respective settings were not changed on the command line. if test "$prefix" = /usr; then test "$sysconfdir" = '${prefix}/etc' && sysconfdir=/etc test "$sharedstatedir" = '${prefix}/com' && sharedstatedir=/var test "$localstatedir" = '${prefix}/var' && localstatedir=/var fi Likewise, on platforms where 64-bit libraries are built by default, then installed in ‘/usr/local/lib64’ instead of ‘/usr/local/lib’, it is appropriate to install ‘/usr/local/share/config.site’: # /usr/local/share/config.site for platforms that prefer # the directory /usr/local/lib64 over /usr/local/lib. test "$libdir" = '${exec_prefix}/lib' && libdir='${exec_prefix}/lib64'  File: autoconf.info, Node: Running configure Scripts, Next: config.status Invocation, Prev: Site Configuration, Up: Top 16 Running ‘configure’ Scripts ****************************** Below are instructions on how to configure a package that uses a ‘configure’ script, suitable for inclusion as an ‘INSTALL’ file in the package. A plain-text version of ‘INSTALL’ which you may use comes with Autoconf. * Menu: * Basic Installation:: Instructions for typical cases * Compilers and Options:: Selecting compilers and optimization * Multiple Architectures:: Compiling for multiple architectures at once * Installation Names:: Installing in different directories * Optional Features:: Selecting optional features * Particular Systems:: Particular systems * System Type:: Specifying the system type * Sharing Defaults:: Setting site-wide defaults for ‘configure’ * Defining Variables:: Specifying the compiler etc. * configure Invocation:: Changing how ‘configure’ runs  File: autoconf.info, Node: Basic Installation, Next: Compilers and Options, Up: Running configure Scripts 16.1 Basic Installation ======================= Briefly, the shell command ‘./configure && make && make install’ should configure, build, and install this package. The following more-detailed instructions are generic; see the ‘README’ file for instructions specific to this package. More recommendations for GNU packages can be found in *note Makefile Conventions: (standards)Makefile Conventions. The ‘configure’ shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses those values to create a ‘Makefile’ in each directory of the package. It may also create one or more ‘.h’ files containing system-dependent definitions. Finally, it creates a shell script ‘config.status’ that you can run in the future to recreate the current configuration, and a file ‘config.log’ containing compiler output (useful mainly for debugging ‘configure’). It can also use an optional file (typically called ‘config.cache’ and enabled with ‘--cache-file=config.cache’ or simply ‘-C’) that saves the results of its tests to speed up reconfiguring. Caching is disabled by default to prevent problems with accidental use of stale cache files. If you need to do unusual things to compile the package, please try to figure out how ‘configure’ could check whether to do them, and mail diffs or instructions to the address given in the ‘README’ so they can be considered for the next release. If you are using the cache, and at some point ‘config.cache’ contains results you don’t want to keep, you may remove or edit it. The file ‘configure.ac’ (or ‘configure.in’) is used to create ‘configure’ by a program called ‘autoconf’. You need ‘configure.ac’ if you want to change it or regenerate ‘configure’ using a newer version of ‘autoconf’. The simplest way to compile this package is: 1. ‘cd’ to the directory containing the package’s source code and type ‘./configure’ to configure the package for your system. Running ‘configure’ might take a while. While running, it prints some messages telling which features it is checking for. 2. Type ‘make’ to compile the package. 3. Optionally, type ‘make check’ to run any self-tests that come with the package, generally using the just-built uninstalled binaries. 4. Type ‘make install’ to install the programs and any data files and documentation. When installing into a prefix owned by root, it is recommended that the package be configured and built as a regular user, and only the ‘make install’ phase executed with root privileges. 5. Optionally, type ‘make installcheck’ to repeat any self-tests, but this time using the binaries in their final installed location. This target does not install anything. Running this target as a regular user, particularly if the prior ‘make install’ required root privileges, verifies that the installation completed correctly. 6. You can remove the program binaries and object files from the source code directory by typing ‘make clean’. To also remove the files that ‘configure’ created (so you can compile the package for a different kind of computer), type ‘make distclean’. There is also a ‘make maintainer-clean’ target, but that is intended mainly for the package’s developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. 7. Often, you can also type ‘make uninstall’ to remove the installed files again. In practice, not all packages have tested that uninstallation works correctly, even though it is required by the GNU Coding Standards. 8. Some packages, particularly those that use Automake, provide ‘make distcheck’, which can by used by developers to test that all other targets like ‘make install’ and ‘make uninstall’ work correctly. This target is generally not run by end users.  File: autoconf.info, Node: Compilers and Options, Next: Multiple Architectures, Prev: Basic Installation, Up: Running configure Scripts 16.2 Compilers and Options ========================== Some systems require unusual options for compilation or linking that the ‘configure’ script does not know about. Run ‘./configure --help’ for details on some of the pertinent environment variables. You can give ‘configure’ initial values for configuration parameters by setting variables in the command line or in the environment. Here is an example: ./configure CC=c99 CFLAGS=-g LIBS=-lposix *Note Defining Variables::, for more details.  File: autoconf.info, Node: Multiple Architectures, Next: Installation Names, Prev: Compilers and Options, Up: Running configure Scripts 16.3 Compiling For Multiple Architectures ========================================= You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their own directory. To do this, you can use GNU ‘make’. ‘cd’ to the directory where you want the object files and executables to go and run the ‘configure’ script. ‘configure’ automatically checks for the source code in the directory that ‘configure’ is in and in ‘..’. This is known as a “VPATH” build. With a non-GNU ‘make’, it is safer to compile the package for one architecture at a time in the source code directory. After you have installed the package for one architecture, use ‘make distclean’ before reconfiguring for another architecture. On MacOS X 10.5 and later systems, you can create libraries and executables that work on multiple system types—known as “fat” or “universal” binaries—by specifying multiple ‘-arch’ options to the compiler but only a single ‘-arch’ option to the preprocessor. Like this: ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CPP="gcc -E" CXXCPP="g++ -E" This is not guaranteed to produce working output in all cases, you may have to build one architecture at a time and combine the results using the ‘lipo’ tool if you have problems.  File: autoconf.info, Node: Installation Names, Next: Optional Features, Prev: Multiple Architectures, Up: Running configure Scripts 16.4 Installation Names ======================= By default, ‘make install’ installs the package’s commands under ‘/usr/local/bin’, include files under ‘/usr/local/include’, etc. You can specify an installation prefix other than ‘/usr/local’ by giving ‘configure’ the option ‘--prefix=PREFIX’, where PREFIX must be an absolute file name. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you pass the option ‘--exec-prefix=PREFIX’ to ‘configure’, the package uses PREFIX as the prefix for installing programs and libraries. Documentation and other data files still use the regular prefix. In addition, if you use an unusual directory layout you can give options like ‘--bindir=DIR’ to specify different values for particular kinds of files. Run ‘configure --help’ for a list of the directories you can set and what kinds of files go in them. In general, the default for these options is expressed in terms of ‘${prefix}’, so that specifying just ‘--prefix’ will affect all of the other directory specifications that were not explicitly provided. The most portable way to affect installation locations is to pass the correct locations to ‘configure’; however, many packages provide one or both of the following shortcuts of passing variable assignments to the ‘make install’ command line to change installation locations without having to reconfigure or recompile. The first method involves providing an override variable for each affected directory. For example, ‘make install prefix=/alternate/directory’ will choose an alternate location for all directory configuration variables that were expressed in terms of ‘${prefix}’. Any directories that were specified during ‘configure’, but not in terms of ‘${prefix}’, must each be overridden at install time for the entire installation to be relocated. The approach of makefile variable overrides for each directory variable is required by the GNU Coding Standards, and ideally causes no recompilation. However, some platforms have known limitations with the semantics of shared libraries that end up requiring recompilation when using this method, particularly noticeable in packages that use GNU Libtool. The second method involves providing the ‘DESTDIR’ variable. For example, ‘make install DESTDIR=/alternate/directory’ will prepend ‘/alternate/directory’ before all installation names. The approach of ‘DESTDIR’ overrides is not required by the GNU Coding Standards, and does not work on platforms that have drive letters. On the other hand, it does better at avoiding recompilation issues, and works well even when some directory options were not specified in terms of ‘${prefix}’ at ‘configure’ time.  File: autoconf.info, Node: Optional Features, Next: Particular Systems, Prev: Installation Names, Up: Running configure Scripts 16.5 Optional Features ====================== If the package supports it, you can cause programs to be installed with an extra prefix or suffix on their names by giving ‘configure’ the option ‘--program-prefix=PREFIX’ or ‘--program-suffix=SUFFIX’. Some packages pay attention to ‘--enable-FEATURE’ options to ‘configure’, where FEATURE indicates an optional part of the package. They may also pay attention to ‘--with-PACKAGE’ options, where PACKAGE is something like ‘gnu-as’ or ‘x’ (for the X Window System). The ‘README’ should mention any ‘--enable-’ and ‘--with-’ options that the package recognizes. For packages that use the X Window System, ‘configure’ can usually find the X include and library files automatically, but if it doesn’t, you can use the ‘configure’ options ‘--x-includes=DIR’ and ‘--x-libraries=DIR’ to specify their locations. Some packages offer the ability to configure how verbose the execution of ‘make’ will be. For these packages, running ‘./configure --enable-silent-rules’ sets the default to minimal output, which can be overridden with ‘make V=1’; while running ‘./configure --disable-silent-rules’ sets the default to verbose, which can be overridden with ‘make V=0’.  File: autoconf.info, Node: Particular Systems, Next: System Type, Prev: Optional Features, Up: Running configure Scripts 16.6 Particular systems ======================= On HP-UX, the default C compiler is not ANSI C compatible. If GNU CC is not installed, it is recommended to use the following options in order to use an ANSI C compiler: ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" and if that doesn’t work, install pre-built binaries of GCC for HP-UX. HP-UX ‘make’ updates targets which have the same timestamps as their prerequisites, which makes it generally unusable when shipped generated files such as ‘configure’ are involved. Use GNU ‘make’ instead. On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot parse its ‘’ header file. The option ‘-nodtk’ can be used as a workaround. If GNU CC is not installed, it is therefore recommended to try ./configure CC="cc" and if that doesn’t work, try ./configure CC="cc -nodtk" On Solaris, don’t put ‘/usr/ucb’ early in your ‘PATH’. This directory contains several dysfunctional programs; working variants of these programs are available in ‘/usr/bin’. So, if you need ‘/usr/ucb’ in your ‘PATH’, put it _after_ ‘/usr/bin’. On Haiku, software installed for all users goes in ‘/boot/common’, not ‘/usr/local’. It is recommended to use the following options: ./configure --prefix=/boot/common  File: autoconf.info, Node: System Type, Next: Sharing Defaults, Prev: Particular Systems, Up: Running configure Scripts 16.7 Specifying the System Type =============================== There may be some features ‘configure’ cannot figure out automatically, but needs to determine by the type of machine the package will run on. Usually, assuming the package is built to be run on the _same_ architectures, ‘configure’ can figure that out, but if it prints a message saying it cannot guess the machine type, give it the ‘--build=TYPE’ option. TYPE can either be a short name for the system type, such as ‘sun4’, or a canonical name which has the form: CPU-COMPANY-SYSTEM where SYSTEM can have one of these forms: OS KERNEL-OS See the file ‘config.sub’ for the possible values of each field. If ‘config.sub’ isn’t included in this package, then this package doesn’t need to know the machine type. If you are _building_ compiler tools for cross-compiling, you should use the option ‘--target=TYPE’ to select the type of system they will produce code for. If you want to _use_ a cross compiler, that generates code for a platform different from the build platform, you should specify the “host” platform (i.e., that on which the generated programs will eventually be run) with ‘--host=TYPE’.  File: autoconf.info, Node: Sharing Defaults, Next: Defining Variables, Prev: System Type, Up: Running configure Scripts 16.8 Sharing Defaults ===================== If you want to set default values for ‘configure’ scripts to share, you can create a site shell script called ‘config.site’ that gives default values for variables like ‘CC’, ‘cache_file’, and ‘prefix’. ‘configure’ looks for ‘PREFIX/share/config.site’ if it exists, then ‘PREFIX/etc/config.site’ if it exists. Or, you can set the ‘CONFIG_SITE’ environment variable to the location of the site script. A warning: not all ‘configure’ scripts look for a site script.  File: autoconf.info, Node: Defining Variables, Next: configure Invocation, Prev: Sharing Defaults, Up: Running configure Scripts 16.9 Defining Variables ======================= Variables not defined in a site shell script can be set in the environment passed to ‘configure’. However, some packages may run configure again during the build, and the customized values of these variables may be lost. In order to avoid this problem, you should set them in the ‘configure’ command line, using ‘VAR=value’. For example: ./configure CC=/usr/local2/bin/gcc causes the specified ‘gcc’ to be used as the C compiler (unless it is overridden in the site shell script). Unfortunately, this technique does not work for ‘CONFIG_SHELL’ due to an Autoconf limitation. Until the limitation is lifted, you can use this workaround: CONFIG_SHELL=/bin/bash ./configure CONFIG_SHELL=/bin/bash  File: autoconf.info, Node: configure Invocation, Prev: Defining Variables, Up: Running configure Scripts 16.10 ‘configure’ Invocation ============================ ‘configure’ recognizes the following options to control how it operates. ‘--help’ ‘-h’ Print a summary of all of the options to ‘configure’, and exit. ‘--help=short’ ‘--help=recursive’ Print a summary of the options unique to this package’s ‘configure’, and exit. The ‘short’ variant lists options used only in the top level, while the ‘recursive’ variant lists options also present in any nested packages. ‘--version’ ‘-V’ Print the version of Autoconf used to generate the ‘configure’ script, and exit. ‘--cache-file=FILE’ Enable the cache: use and save the results of the tests in FILE, traditionally ‘config.cache’. FILE defaults to ‘/dev/null’ to disable caching. ‘--config-cache’ ‘-C’ Alias for ‘--cache-file=config.cache’. ‘--quiet’ ‘--silent’ ‘-q’ Do not print messages saying which checks are being made. To suppress all normal output, redirect it to ‘/dev/null’ (any error messages will still be shown). ‘--srcdir=DIR’ Look for the package’s source code in directory DIR. Usually ‘configure’ can determine that directory automatically. ‘--prefix=DIR’ Use DIR as the installation prefix. *note Installation Names:: for more details, including other options available for fine-tuning the installation locations. ‘--no-create’ ‘-n’ Run the configure checks, but stop before creating any output files. ‘configure’ also accepts some other, not widely useful, options. Run ‘configure --help’ for more details.  File: autoconf.info, Node: config.status Invocation, Next: Obsolete Constructs, Prev: Running configure Scripts, Up: Top 17 config.status Invocation *************************** The ‘configure’ script creates a file named ‘config.status’, which actually configures, “instantiates”, the template files. It also records the configuration options that were specified when the package was last configured in case reconfiguring is needed. Synopsis: ./config.status [OPTION]... [TAG]... It configures each TAG; if none are specified, all the templates are instantiated. A TAG refers to a file or other tag associated with a configuration action, as specified by an ‘AC_CONFIG_ITEMS’ macro (*note Configuration Actions::). The files must be specified without their dependencies, as in ./config.status foobar not ./config.status foobar:foo.in:bar.in The supported options are: ‘--help’ ‘-h’ Print a summary of the command line options, the list of the template files, and exit. ‘--version’ ‘-V’ Print the version number of Autoconf and the configuration settings, and exit. ‘--config’ Print the configuration settings in reusable way, quoted for the shell, and exit. For example, for a debugging build that otherwise reuses the configuration from a different build directory BUILD-DIR of a package in SRC-DIR, you could use the following: args=`BUILD-DIR/config.status --config` eval SRC-DIR/configure "$args" CFLAGS=-g --srcdir=SRC-DIR Note that it may be necessary to override a ‘--srcdir’ setting that was saved in the configuration, if the arguments are used in a different build directory. ‘--silent’ ‘--quiet’ ‘-q’ Do not print progress messages. ‘--debug’ ‘-d’ Don’t remove the temporary files. ‘--file=FILE[:TEMPLATE]’ Require that FILE be instantiated as if ‘AC_CONFIG_FILES(FILE:TEMPLATE)’ was used. Both FILE and TEMPLATE may be ‘-’ in which case the standard output and/or standard input, respectively, is used. If a TEMPLATE file name is relative, it is first looked for in the build tree, and then in the source tree. *Note Configuration Actions::, for more details. This option and the following ones provide one way for separately distributed packages to share the values computed by ‘configure’. Doing so can be useful if some of the packages need a superset of the features that one of them, perhaps a common library, does. These options allow a ‘config.status’ file to create files other than the ones that its ‘configure.ac’ specifies, so it can be used for a different package, or for extracting a subset of values. For example, echo '@CC@' | ./config.status --file=- provides the value of ‘@CC@’ on standard output. ‘--header=FILE[:TEMPLATE]’ Same as ‘--file’ above, but with ‘AC_CONFIG_HEADERS’. ‘--recheck’ Ask ‘config.status’ to update itself and exit (no instantiation). This option is useful if you change ‘configure’, so that the results of some tests might be different from the previous run. The ‘--recheck’ option reruns ‘configure’ with the same arguments you used before, plus the ‘--no-create’ option, which prevents ‘configure’ from running ‘config.status’ and creating ‘Makefile’ and other files, and the ‘--no-recursion’ option, which prevents ‘configure’ from running other ‘configure’ scripts in subdirectories. (This is so other Make rules can run ‘config.status’ when it changes; *note Automatic Remaking::, for an example). ‘config.status’ checks several optional environment variables that can alter its behavior: -- Variable: CONFIG_SHELL The shell with which to run ‘configure’. It must be Bourne-compatible, and the absolute name of the shell should be passed. The default is a shell that supports ‘LINENO’ if available, and ‘/bin/sh’ otherwise. -- Variable: CONFIG_STATUS The file name to use for the shell script that records the configuration. The default is ‘./config.status’. This variable is useful when one package uses parts of another and the ‘configure’ scripts shouldn’t be merged because they are maintained separately. You can use ‘./config.status’ in your makefiles. For example, in the dependencies given above (*note Automatic Remaking::), ‘config.status’ is run twice when ‘configure.ac’ has changed. If that bothers you, you can make each run only regenerate the files for that rule: config.h: stamp-h stamp-h: config.h.in config.status ./config.status config.h echo > stamp-h Makefile: Makefile.in config.status ./config.status Makefile The calling convention of ‘config.status’ has changed; see *note Obsolete config.status Use::, for details.  File: autoconf.info, Node: Obsolete Constructs, Next: Using Autotest, Prev: config.status Invocation, Up: Top 18 Obsolete Constructs ********************** Autoconf changes, and throughout the years some constructs have been obsoleted. Most of the changes involve the macros, but in some cases the tools themselves, or even some concepts, are now considered obsolete. You may completely skip this chapter if you are new to Autoconf. Its intention is mainly to help maintainers updating their packages by understanding how to move to more modern constructs. * Menu: * Obsolete config.status Use:: Obsolete convention for ‘config.status’ * acconfig Header:: Additional entries in ‘config.h.in’ * autoupdate Invocation:: Automatic update of ‘configure.ac’ * Obsolete Macros:: Backward compatibility macros * Autoconf 1:: Tips for upgrading your files * Autoconf 2.13:: Some fresher tips  File: autoconf.info, Node: Obsolete config.status Use, Next: acconfig Header, Up: Obsolete Constructs 18.1 Obsolete ‘config.status’ Invocation ======================================== ‘config.status’ now supports arguments to specify the files to instantiate; see *note config.status Invocation::, for more details. Before, environment variables had to be used. -- Variable: CONFIG_COMMANDS The tags of the commands to execute. The default is the arguments given to ‘AC_OUTPUT’ and ‘AC_CONFIG_COMMANDS’ in ‘configure.ac’. -- Variable: CONFIG_FILES The files in which to perform ‘@VARIABLE@’ substitutions. The default is the arguments given to ‘AC_OUTPUT’ and ‘AC_CONFIG_FILES’ in ‘configure.ac’. -- Variable: CONFIG_HEADERS The files in which to substitute C ‘#define’ statements. The default is the arguments given to ‘AC_CONFIG_HEADERS’; if that macro was not called, ‘config.status’ ignores this variable. -- Variable: CONFIG_LINKS The symbolic links to establish. The default is the arguments given to ‘AC_CONFIG_LINKS’; if that macro was not called, ‘config.status’ ignores this variable. In *note config.status Invocation::, using this old interface, the example would be: config.h: stamp-h stamp-h: config.h.in config.status CONFIG_COMMANDS= CONFIG_LINKS= CONFIG_FILES= \ CONFIG_HEADERS=config.h ./config.status echo > stamp-h Makefile: Makefile.in config.status CONFIG_COMMANDS= CONFIG_LINKS= CONFIG_HEADERS= \ CONFIG_FILES=Makefile ./config.status (If ‘configure.ac’ does not call ‘AC_CONFIG_HEADERS’, there is no need to set ‘CONFIG_HEADERS’ in the ‘make’ rules. Equally for ‘CONFIG_COMMANDS’, etc.)  File: autoconf.info, Node: acconfig Header, Next: autoupdate Invocation, Prev: Obsolete config.status Use, Up: Obsolete Constructs 18.2 ‘acconfig.h’ ================= In order to produce ‘config.h.in’, ‘autoheader’ needs to build or to find templates for each symbol. Modern releases of Autoconf use ‘AH_VERBATIM’ and ‘AH_TEMPLATE’ (*note Autoheader Macros::), but in older releases a file, ‘acconfig.h’, contained the list of needed templates. ‘autoheader’ copied comments and ‘#define’ and ‘#undef’ statements from ‘acconfig.h’ in the current directory, if present. This file used to be mandatory if you ‘AC_DEFINE’ any additional symbols. Modern releases of Autoconf also provide ‘AH_TOP’ and ‘AH_BOTTOM’ if you need to prepend/append some information to ‘config.h.in’. Ancient versions of Autoconf had a similar feature: if ‘./acconfig.h’ contains the string ‘@TOP@’, ‘autoheader’ copies the lines before the line containing ‘@TOP@’ into the top of the file that it generates. Similarly, if ‘./acconfig.h’ contains the string ‘@BOTTOM@’, ‘autoheader’ copies the lines after that line to the end of the file it generates. Either or both of those strings may be omitted. An even older alternate way to produce the same effect in ancient versions of Autoconf is to create the files ‘FILE.top’ (typically ‘config.h.top’) and/or ‘FILE.bot’ in the current directory. If they exist, ‘autoheader’ copies them to the beginning and end, respectively, of its output. In former versions of Autoconf, the files used in preparing a software package for distribution were: configure.ac --. .------> autoconf* -----> configure +---+ [aclocal.m4] --+ `---. [acsite.m4] ---' | +--> [autoheader*] -> [config.h.in] [acconfig.h] ----. | +-----' [config.h.top] --+ [config.h.bot] --' Using only the ‘AH_’ macros, ‘configure.ac’ should be self-contained, and should not depend upon ‘acconfig.h’ etc.  File: autoconf.info, Node: autoupdate Invocation, Next: Obsolete Macros, Prev: acconfig Header, Up: Obsolete Constructs 18.3 Using ‘autoupdate’ to Modernize ‘configure.ac’ =================================================== The ‘autoupdate’ program updates a ‘configure.ac’ file that calls Autoconf macros by their old names to use the current macro names. In version 2 of Autoconf, most of the macros were renamed to use a more uniform and descriptive naming scheme. *Note Macro Names::, for a description of the new scheme. Although the old names still work (*note Obsolete Macros::, for a list of the old macros and the corresponding new names), you can make your ‘configure.ac’ files more readable and make it easier to use the current Autoconf documentation if you update them to use the new macro names. If given no arguments, ‘autoupdate’ updates ‘configure.ac’, backing up the original version with the suffix ‘~’ (or the value of the environment variable ‘SIMPLE_BACKUP_SUFFIX’, if that is set). If you give ‘autoupdate’ an argument, it reads that file instead of ‘configure.ac’ and writes the updated file to the standard output. ‘autoupdate’ accepts the following options: ‘--help’ ‘-h’ Print a summary of the command line options and exit. ‘--version’ ‘-V’ Print the version number of Autoconf and exit. ‘--verbose’ ‘-v’ Report processing steps. ‘--debug’ ‘-d’ Don’t remove the temporary files. ‘--force’ ‘-f’ Force the update even if the file has not changed. Disregard the cache. ‘--include=DIR’ ‘-I DIR’ Also look for input files in DIR. Multiple invocations accumulate. Directories are browsed from last to first. ‘--prepend-include=DIR’ ‘-B DIR’ Prepend directory DIR to the search path. This is used to include the language-specific files before any third-party macros.  File: autoconf.info, Node: Obsolete Macros, Next: Autoconf 1, Prev: autoupdate Invocation, Up: Obsolete Constructs 18.4 Obsolete Macros ==================== Several macros are obsoleted in Autoconf, for various reasons (typically they failed to quote properly, couldn’t be extended for more recent issues, etc.). They are still supported, but deprecated: their use should be avoided. During the jump from Autoconf version 1 to version 2, most of the macros were renamed to use a more uniform and descriptive naming scheme, but their signature did not change. *Note Macro Names::, for a description of the new naming scheme. Below, if there is just the mapping from old names to new names for these macros, the reader is invited to refer to the definition of the new macro for the signature and the description. -- Macro: AC_AIX This macro is a platform-specific subset of ‘AC_USE_SYSTEM_EXTENSIONS’ (*note AC_USE_SYSTEM_EXTENSIONS::). -- Macro: AC_ALLOCA Replaced by ‘AC_FUNC_ALLOCA’ (*note AC_FUNC_ALLOCA::). -- Macro: AC_ARG_ARRAY Removed because of limited usefulness. -- Macro: AC_C_CROSS This macro is obsolete; it does nothing. -- Macro: AC_C_LONG_DOUBLE If the C compiler supports a working ‘long double’ type with more range or precision than the ‘double’ type, define ‘HAVE_LONG_DOUBLE’. You should use ‘AC_TYPE_LONG_DOUBLE’ or ‘AC_TYPE_LONG_DOUBLE_WIDER’ instead. *Note Particular Types::. -- Macro: AC_CANONICAL_SYSTEM Determine the system type and set output variables to the names of the canonical system types. *Note Canonicalizing::, for details about the variables this macro sets. The user is encouraged to use either ‘AC_CANONICAL_BUILD’, or ‘AC_CANONICAL_HOST’, or ‘AC_CANONICAL_TARGET’, depending on the needs. Using ‘AC_CANONICAL_TARGET’ is enough to run the two other macros (*note Canonicalizing::). -- Macro: AC_CHAR_UNSIGNED Replaced by ‘AC_C_CHAR_UNSIGNED’ (*note AC_C_CHAR_UNSIGNED::). -- Macro: AC_CHECK_TYPE (TYPE, DEFAULT) Autoconf, up to 2.13, used to provide this version of ‘AC_CHECK_TYPE’, deprecated because of its flaws. First, although it is a member of the ‘CHECK’ clan, it does more than just checking. Secondly, missing types are defined using ‘#define’, not ‘typedef’, and this can lead to problems in the case of pointer types. This use of ‘AC_CHECK_TYPE’ is obsolete and discouraged; see *note Generic Types::, for the description of the current macro. If the type TYPE is not defined, define it to be the C (or C++) builtin type DEFAULT, e.g., ‘short int’ or ‘unsigned int’. This macro is equivalent to: AC_CHECK_TYPE([TYPE], [], [AC_DEFINE_UNQUOTED([TYPE], [DEFAULT], [Define to `DEFAULT' if does not define.])]) In order to keep backward compatibility, the two versions of ‘AC_CHECK_TYPE’ are implemented, selected using these heuristics: 1. If there are three or four arguments, the modern version is used. 2. If the second argument appears to be a C or C++ type, then the obsolete version is used. This happens if the argument is a C or C++ _builtin_ type or a C identifier ending in ‘_t’, optionally followed by one of ‘[(* ’ and then by a string of zero or more characters taken from the set ‘[]()* _a-zA-Z0-9’. 3. If the second argument is spelled with the alphabet of valid C and C++ types, the user is warned and the modern version is used. 4. Otherwise, the modern version is used. You are encouraged either to use a valid builtin type, or to use the equivalent modern code (see above), or better yet, to use ‘AC_CHECK_TYPES’ together with #ifndef HAVE_LOFF_T typedef loff_t off_t; #endif -- Macro: AC_CHECKING (FEATURE-DESCRIPTION) Same as AC_MSG_NOTICE([checking FEATURE-DESCRIPTION...] *Note AC_MSG_NOTICE::. -- Macro: AC_COMPILE_CHECK (ECHO-TEXT, INCLUDES, FUNCTION-BODY, ACTION-IF-TRUE, [ACTION-IF-FALSE]) This is an obsolete version of ‘AC_TRY_COMPILE’ itself replaced by ‘AC_COMPILE_IFELSE’ (*note Running the Compiler::), with the addition that it prints ‘checking for ECHO-TEXT’ to the standard output first, if ECHO-TEXT is non-empty. Use ‘AC_MSG_CHECKING’ and ‘AC_MSG_RESULT’ instead to print messages (*note Printing Messages::). -- Macro: AC_CONST Replaced by ‘AC_C_CONST’ (*note AC_C_CONST::). -- Macro: AC_CROSS_CHECK Same as ‘AC_C_CROSS’, which is obsolete too, and does nothing ‘:-)’. -- Macro: AC_CYGWIN Check for the Cygwin environment in which case the shell variable ‘CYGWIN’ is set to ‘yes’. Don’t use this macro, the dignified means to check the nature of the host is using ‘AC_CANONICAL_HOST’ (*note Canonicalizing::). As a matter of fact this macro is defined as: AC_REQUIRE([AC_CANONICAL_HOST])[]dnl case $host_os in *cygwin* ) CYGWIN=yes;; * ) CYGWIN=no;; esac Beware that the variable ‘CYGWIN’ has a special meaning when running Cygwin, and should not be changed. That’s yet another reason not to use this macro. -- Macro: AC_DECL_SYS_SIGLIST Same as: AC_CHECK_DECLS([sys_siglist], [], [], [#include /* NetBSD declares sys_siglist in unistd.h. */ #ifdef HAVE_UNISTD_H # include #endif ]) *Note AC_CHECK_DECLS::. -- Macro: AC_DECL_YYTEXT Does nothing, now integrated in ‘AC_PROG_LEX’ (*note AC_PROG_LEX::). -- Macro: AC_DIAGNOSE (CATEGORY, MESSAGE) Replaced by ‘m4_warn’ (*note m4_warn::). -- Macro: AC_DIR_HEADER Like calling ‘AC_FUNC_CLOSEDIR_VOID’ (*note AC_FUNC_CLOSEDIR_VOID::) and ‘AC_HEADER_DIRENT’ (*note AC_HEADER_DIRENT::), but defines a different set of C preprocessor macros to indicate which header file is found: Header Old Symbol New Symbol ‘dirent.h’ ‘DIRENT’ ‘HAVE_DIRENT_H’ ‘sys/ndir.h’ ‘SYSNDIR’ ‘HAVE_SYS_NDIR_H’ ‘sys/dir.h’ ‘SYSDIR’ ‘HAVE_SYS_DIR_H’ ‘ndir.h’ ‘NDIR’ ‘HAVE_NDIR_H’ -- Macro: AC_DYNIX_SEQ If on DYNIX/ptx, add ‘-lseq’ to output variable ‘LIBS’. This macro used to be defined as AC_CHECK_LIB([seq], [getmntent], [LIBS="-lseq $LIBS"]) now it is just ‘AC_FUNC_GETMNTENT’ (*note AC_FUNC_GETMNTENT::). -- Macro: AC_EXEEXT Defined the output variable ‘EXEEXT’ based on the output of the compiler, which is now done automatically. Typically set to empty string if Posix and ‘.exe’ if a DOS variant. -- Macro: AC_EMXOS2 Similar to ‘AC_CYGWIN’ but checks for the EMX environment on OS/2 and sets ‘EMXOS2’. Don’t use this macro, the dignified means to check the nature of the host is using ‘AC_CANONICAL_HOST’ (*note Canonicalizing::). -- Macro: AC_ENABLE (FEATURE, ACTION-IF-GIVEN, [ACTION-IF-NOT-GIVEN]) This is an obsolete version of ‘AC_ARG_ENABLE’ that does not support providing a help string (*note AC_ARG_ENABLE::). -- Macro: AC_ERROR Replaced by ‘AC_MSG_ERROR’ (*note AC_MSG_ERROR::). -- Macro: AC_FATAL (MESSAGE) Replaced by ‘m4_fatal’ (*note m4_fatal::). -- Macro: AC_FIND_X Replaced by ‘AC_PATH_X’ (*note AC_PATH_X::). -- Macro: AC_FIND_XTRA Replaced by ‘AC_PATH_XTRA’ (*note AC_PATH_XTRA::). -- Macro: AC_FOREACH Replaced by ‘m4_foreach_w’ (*note m4_foreach_w::). -- Macro: AC_FUNC_CHECK Replaced by ‘AC_CHECK_FUNC’ (*note AC_CHECK_FUNC::). -- Macro: AC_FUNC_SETVBUF_REVERSED Do nothing. Formerly, this macro checked whether ‘setvbuf’ takes the buffering type as its second argument and the buffer pointer as the third, instead of the other way around, and defined ‘SETVBUF_REVERSED’. However, the last systems to have the problem were those based on SVR2, which became obsolete in 1987, and the macro is no longer needed. -- Macro: AC_FUNC_WAIT3 If ‘wait3’ is found and fills in the contents of its third argument (a ‘struct rusage *’), which HP-UX does not do, define ‘HAVE_WAIT3’. These days portable programs should use ‘waitpid’, not ‘wait3’, as ‘wait3’ has been removed from Posix. -- Macro: AC_GCC_TRADITIONAL Replaced by ‘AC_PROG_GCC_TRADITIONAL’ (*note AC_PROG_GCC_TRADITIONAL::). -- Macro: AC_GETGROUPS_T Replaced by ‘AC_TYPE_GETGROUPS’ (*note AC_TYPE_GETGROUPS::). -- Macro: AC_GETLOADAVG Replaced by ‘AC_FUNC_GETLOADAVG’ (*note AC_FUNC_GETLOADAVG::). -- Macro: AC_GNU_SOURCE This macro is a platform-specific subset of ‘AC_USE_SYSTEM_EXTENSIONS’ (*note AC_USE_SYSTEM_EXTENSIONS::). -- Macro: AC_HAVE_FUNCS Replaced by ‘AC_CHECK_FUNCS’ (*note AC_CHECK_FUNCS::). -- Macro: AC_HAVE_HEADERS Replaced by ‘AC_CHECK_HEADERS’ (*note AC_CHECK_HEADERS::). -- Macro: AC_HAVE_LIBRARY (LIBRARY, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND], [OTHER-LIBRARIES]) This macro is equivalent to calling ‘AC_CHECK_LIB’ with a FUNCTION argument of ‘main’. In addition, LIBRARY can be written as any of ‘foo’, ‘-lfoo’, or ‘libfoo.a’. In all of those cases, the compiler is passed ‘-lfoo’. However, LIBRARY cannot be a shell variable; it must be a literal name. *Note AC_CHECK_LIB::. -- Macro: AC_HAVE_POUNDBANG Replaced by ‘AC_SYS_INTERPRETER’ (*note AC_SYS_INTERPRETER::). -- Macro: AC_HEADER_CHECK Replaced by ‘AC_CHECK_HEADER’ (*note AC_CHECK_HEADER::). -- Macro: AC_HEADER_EGREP Replaced by ‘AC_EGREP_HEADER’ (*note AC_EGREP_HEADER::). -- Macro: AC_HEADER_TIME This macro used to check whether it was possible to include ‘time.h’ and ‘sys/time.h’ in the same source file, defining ‘TIME_WITH_SYS_TIME’ if so. Nowadays, it is equivalent to ‘AC_CHECK_HEADERS([sys/time.h])’, although it does still define ‘TIME_WITH_SYS_TIME’ for compatibility’s sake. ‘time.h’ is universally present, and the systems on which ‘sys/time.h’ conflicted with ‘time.h’ are obsolete. -- Macro: AC_HELP_STRING Replaced by ‘AS_HELP_STRING’ (*note AS_HELP_STRING::). -- Macro: AC_INIT (UNIQUE-FILE-IN-SOURCE-DIR) Formerly ‘AC_INIT’ used to have a single argument, and was equivalent to: AC_INIT AC_CONFIG_SRCDIR(UNIQUE-FILE-IN-SOURCE-DIR) See *note AC_INIT:: and *note AC_CONFIG_SRCDIR::. -- Macro: AC_INLINE Replaced by ‘AC_C_INLINE’ (*note AC_C_INLINE::). -- Macro: AC_INT_16_BITS If the C type ‘int’ is 16 bits wide, define ‘INT_16_BITS’. Use ‘AC_CHECK_SIZEOF(int)’ instead (*note AC_CHECK_SIZEOF::). -- Macro: AC_IRIX_SUN If on IRIX (Silicon Graphics Unix), add ‘-lsun’ to output ‘LIBS’. If you were using it to get ‘getmntent’, use ‘AC_FUNC_GETMNTENT’ instead. If you used it for the NIS versions of the password and group functions, use ‘AC_CHECK_LIB(sun, getpwnam)’. Up to Autoconf 2.13, it used to be AC_CHECK_LIB([sun], [getmntent], [LIBS="-lsun $LIBS"]) now it is defined as AC_FUNC_GETMNTENT AC_CHECK_LIB([sun], [getpwnam]) See *note AC_FUNC_GETMNTENT:: and *note AC_CHECK_LIB::. -- Macro: AC_ISC_POSIX This macro adds ‘-lcposix’ to output variable ‘LIBS’ if necessary for Posix facilities. Sun dropped support for the obsolete INTERACTIVE Systems Corporation Unix on 2006-07-23. New programs need not use this macro. It is implemented as ‘AC_SEARCH_LIBS([strerror], [cposix])’ (*note AC_SEARCH_LIBS::). -- Macro: AC_LANG_C Same as ‘AC_LANG([C])’ (*note AC_LANG::). -- Macro: AC_LANG_CPLUSPLUS Same as ‘AC_LANG([C++])’ (*note AC_LANG::). -- Macro: AC_LANG_FORTRAN77 Same as ‘AC_LANG([Fortran 77])’ (*note AC_LANG::). -- Macro: AC_LANG_RESTORE Select the LANGUAGE that is saved on the top of the stack, as set by ‘AC_LANG_SAVE’, remove it from the stack, and call ‘AC_LANG(LANGUAGE)’. *Note Language Choice::, for the preferred way to change languages. -- Macro: AC_LANG_SAVE Remember the current language (as set by ‘AC_LANG’) on a stack. The current language does not change. ‘AC_LANG_PUSH’ is preferred (*note AC_LANG_PUSH::). -- Macro: AC_LINK_FILES (SOURCE..., DEST...) This is an obsolete version of ‘AC_CONFIG_LINKS’ (*note AC_CONFIG_LINKS::. An updated version of: AC_LINK_FILES(config/$machine.h config/$obj_format.h, host.h object.h) is: AC_CONFIG_LINKS([host.h:config/$machine.h object.h:config/$obj_format.h]) -- Macro: AC_LN_S Replaced by ‘AC_PROG_LN_S’ (*note AC_PROG_LN_S::). -- Macro: AC_LONG_64_BITS Define ‘LONG_64_BITS’ if the C type ‘long int’ is 64 bits wide. Use the generic macro ‘AC_CHECK_SIZEOF([long int])’ instead (*note AC_CHECK_SIZEOF::). -- Macro: AC_LONG_DOUBLE If the C compiler supports a working ‘long double’ type with more range or precision than the ‘double’ type, define ‘HAVE_LONG_DOUBLE’. You should use ‘AC_TYPE_LONG_DOUBLE’ or ‘AC_TYPE_LONG_DOUBLE_WIDER’ instead. *Note Particular Types::. -- Macro: AC_LONG_FILE_NAMES Replaced by AC_SYS_LONG_FILE_NAMES *Note AC_SYS_LONG_FILE_NAMES::. -- Macro: AC_MAJOR_HEADER Replaced by ‘AC_HEADER_MAJOR’ (*note AC_HEADER_MAJOR::). -- Macro: AC_MEMORY_H Used to define ‘NEED_MEMORY_H’ if the ‘mem’ functions were defined in ‘memory.h’. Today it is equivalent to ‘AC_CHECK_HEADERS([memory.h])’ (*note AC_CHECK_HEADERS::). Adjust your code to get the ‘mem’ functions from ‘string.h’ instead. -- Macro: AC_MINGW32 Similar to ‘AC_CYGWIN’ but checks for the MinGW compiler environment and sets ‘MINGW32’. Don’t use this macro, the dignified means to check the nature of the host is using ‘AC_CANONICAL_HOST’ (*note Canonicalizing::). -- Macro: AC_MINIX This macro is a platform-specific subset of ‘AC_USE_SYSTEM_EXTENSIONS’ (*note AC_USE_SYSTEM_EXTENSIONS::). -- Macro: AC_MINUS_C_MINUS_O Replaced by ‘AC_PROG_CC_C_O’ (*note AC_PROG_CC_C_O::). -- Macro: AC_MMAP Replaced by ‘AC_FUNC_MMAP’ (*note AC_FUNC_MMAP::). -- Macro: AC_MODE_T Replaced by ‘AC_TYPE_MODE_T’ (*note AC_TYPE_MODE_T::). -- Macro: AC_OBJEXT Defined the output variable ‘OBJEXT’ based on the output of the compiler, after .c files have been excluded. Typically set to ‘o’ if Posix, ‘obj’ if a DOS variant. Now the compiler checking macros handle this automatically. -- Macro: AC_OBSOLETE (THIS-MACRO-NAME, [SUGGESTION]) Make M4 print a message to the standard error output warning that THIS-MACRO-NAME is obsolete, and giving the file and line number where it was called. THIS-MACRO-NAME should be the name of the macro that is calling ‘AC_OBSOLETE’. If SUGGESTION is given, it is printed at the end of the warning message; for example, it can be a suggestion for what to use instead of THIS-MACRO-NAME. For instance AC_OBSOLETE([$0], [; use AC_CHECK_HEADERS(unistd.h) instead])dnl You are encouraged to use ‘AU_DEFUN’ instead, since it gives better services to the user (*note AU_DEFUN::). -- Macro: AC_OFF_T Replaced by ‘AC_TYPE_OFF_T’ (*note AC_TYPE_OFF_T::). -- Macro: AC_OUTPUT ([FILE]..., [EXTRA-CMDS], [INIT-CMDS]) The use of ‘AC_OUTPUT’ with arguments is deprecated. This obsoleted interface is equivalent to: AC_CONFIG_FILES(FILE...) AC_CONFIG_COMMANDS([default], EXTRA-CMDS, INIT-CMDS) AC_OUTPUT See *note AC_CONFIG_FILES::, *note AC_CONFIG_COMMANDS::, and *note AC_OUTPUT::. -- Macro: AC_OUTPUT_COMMANDS (EXTRA-CMDS, [INIT-CMDS]) Specify additional shell commands to run at the end of ‘config.status’, and shell commands to initialize any variables from ‘configure’. This macro may be called multiple times. It is obsolete, replaced by ‘AC_CONFIG_COMMANDS’ (*note AC_CONFIG_COMMANDS::). Here is an unrealistic example: fubar=27 AC_OUTPUT_COMMANDS([echo this is extra $fubar, and so on.], [fubar=$fubar]) AC_OUTPUT_COMMANDS([echo this is another, extra, bit], [echo init bit]) Aside from the fact that ‘AC_CONFIG_COMMANDS’ requires an additional key, an important difference is that ‘AC_OUTPUT_COMMANDS’ is quoting its arguments twice, unlike ‘AC_CONFIG_COMMANDS’. This means that ‘AC_CONFIG_COMMANDS’ can safely be given macro calls as arguments: AC_CONFIG_COMMANDS(foo, [my_FOO()]) Conversely, where one level of quoting was enough for literal strings with ‘AC_OUTPUT_COMMANDS’, you need two with ‘AC_CONFIG_COMMANDS’. The following lines are equivalent: AC_OUTPUT_COMMANDS([echo "Square brackets: []"]) AC_CONFIG_COMMANDS([default], [[echo "Square brackets: []"]]) -- Macro: AC_PID_T Replaced by ‘AC_TYPE_PID_T’ (*note AC_TYPE_PID_T::). -- Macro: AC_PREFIX Replaced by ‘AC_PREFIX_PROGRAM’ (*note AC_PREFIX_PROGRAM::). -- Macro: AC_PROG_CC_C89 Now done by ‘AC_PROG_CC’ (*note AC_PROG_CC::). -- Macro: AC_PROG_CC_C99 Now done by ‘AC_PROG_CC’ (*note AC_PROG_CC::). -- Macro: AC_PROG_CC_STDC Now done by ‘AC_PROG_CC’ (*note AC_PROG_CC::). -- Macro: AC_PROGRAMS_CHECK Replaced by ‘AC_CHECK_PROGS’ (*note AC_CHECK_PROGS::). -- Macro: AC_PROGRAMS_PATH Replaced by ‘AC_PATH_PROGS’ (*note AC_PATH_PROGS::). -- Macro: AC_PROGRAM_CHECK Replaced by ‘AC_CHECK_PROG’ (*note AC_CHECK_PROG::). -- Macro: AC_PROGRAM_EGREP Replaced by ‘AC_EGREP_CPP’ (*note AC_EGREP_CPP::). -- Macro: AC_PROGRAM_PATH Replaced by ‘AC_PATH_PROG’ (*note AC_PATH_PROG::). -- Macro: AC_REMOTE_TAPE Removed because of limited usefulness. -- Macro: AC_RESTARTABLE_SYSCALLS This macro was renamed ‘AC_SYS_RESTARTABLE_SYSCALLS’. However, these days portable programs should use ‘sigaction’ with ‘SA_RESTART’ if they want restartable system calls. They should not rely on ‘HAVE_RESTARTABLE_SYSCALLS’, since nowadays whether a system call is restartable is a dynamic issue, not a configuration-time issue. -- Macro: AC_RETSIGTYPE Replaced by ‘AC_TYPE_SIGNAL’ (*note AC_TYPE_SIGNAL::), which itself is obsolete when assuming C89 or better. -- Macro: AC_RSH Removed because of limited usefulness. -- Macro: AC_SCO_INTL If on SCO Unix, add ‘-lintl’ to output variable ‘LIBS’. This macro used to do this: AC_CHECK_LIB([intl], [strftime], [LIBS="-lintl $LIBS"]) Now it just calls ‘AC_FUNC_STRFTIME’ instead (*note AC_FUNC_STRFTIME::). -- Macro: AC_SETVBUF_REVERSED Replaced by AC_FUNC_SETVBUF_REVERSED *Note AC_FUNC_SETVBUF_REVERSED::. -- Macro: AC_SET_MAKE Replaced by ‘AC_PROG_MAKE_SET’ (*note AC_PROG_MAKE_SET::). -- Macro: AC_SIZEOF_TYPE Replaced by ‘AC_CHECK_SIZEOF’ (*note AC_CHECK_SIZEOF::). -- Macro: AC_SIZE_T Replaced by ‘AC_TYPE_SIZE_T’ (*note AC_TYPE_SIZE_T::). -- Macro: AC_STAT_MACROS_BROKEN Replaced by ‘AC_HEADER_STAT’ (*note AC_HEADER_STAT::). -- Macro: AC_STDC_HEADERS Replaced by ‘AC_HEADER_STDC’ (*note AC_HEADER_STDC::), which is itself obsolete. Nowadays it is safe to assume the facilities of C90 exist. -- Macro: AC_STRCOLL Replaced by ‘AC_FUNC_STRCOLL’ (*note AC_FUNC_STRCOLL::). -- Macro: AC_STRUCT_ST_BLKSIZE If ‘struct stat’ contains an ‘st_blksize’ member, define ‘HAVE_STRUCT_STAT_ST_BLKSIZE’. The former name, ‘HAVE_ST_BLKSIZE’ is to be avoided, as its support will cease in the future. This macro is obsoleted, and should be replaced by AC_CHECK_MEMBERS([struct stat.st_blksize]) *Note AC_CHECK_MEMBERS::. -- Macro: AC_STRUCT_ST_RDEV If ‘struct stat’ contains an ‘st_rdev’ member, define ‘HAVE_STRUCT_STAT_ST_RDEV’. The former name for this macro, ‘HAVE_ST_RDEV’, is to be avoided as it will cease to be supported in the future. Actually, even the new macro is obsolete and should be replaced by: AC_CHECK_MEMBERS([struct stat.st_rdev]) *Note AC_CHECK_MEMBERS::. -- Macro: AC_ST_BLKSIZE Replaced by ‘AC_CHECK_MEMBERS’ (*note AC_CHECK_MEMBERS::). -- Macro: AC_ST_BLOCKS Replaced by ‘AC_STRUCT_ST_BLOCKS’ (*note AC_STRUCT_ST_BLOCKS::). -- Macro: AC_ST_RDEV Replaced by ‘AC_CHECK_MEMBERS’ (*note AC_CHECK_MEMBERS::). -- Macro: AC_SYS_RESTARTABLE_SYSCALLS If the system automatically restarts a system call that is interrupted by a signal, define ‘HAVE_RESTARTABLE_SYSCALLS’. This macro does not check whether system calls are restarted in general—it checks whether a signal handler installed with ‘signal’ (but not ‘sigaction’) causes system calls to be restarted. It does not check whether system calls can be restarted when interrupted by signals that have no handler. These days portable programs should use ‘sigaction’ with ‘SA_RESTART’ if they want restartable system calls. They should not rely on ‘HAVE_RESTARTABLE_SYSCALLS’, since nowadays whether a system call is restartable is a dynamic issue, not a configuration-time issue. -- Macro: AC_SYS_SIGLIST_DECLARED This macro was renamed ‘AC_DECL_SYS_SIGLIST’. However, even that name is obsolete, as the same functionality is now achieved via ‘AC_CHECK_DECLS’ (*note AC_CHECK_DECLS::). -- Macro: AC_TEST_CPP This macro was renamed ‘AC_TRY_CPP’, which in turn was replaced by ‘AC_PREPROC_IFELSE’ (*note AC_PREPROC_IFELSE::). -- Macro: AC_TEST_PROGRAM This macro was renamed ‘AC_TRY_RUN’, which in turn was replaced by ‘AC_RUN_IFELSE’ (*note AC_RUN_IFELSE::). -- Macro: AC_TIMEZONE Replaced by ‘AC_STRUCT_TIMEZONE’ (*note AC_STRUCT_TIMEZONE::). -- Macro: AC_TIME_WITH_SYS_TIME Replaced by ‘AC_HEADER_TIME’ (*note AC_HEADER_TIME::), which is itself obsolete; nowadays one need only do ‘AC_CHECK_HEADERS([sys/time.h])’. -- Macro: AC_TRY_COMPILE (INCLUDES, FUNCTION-BODY, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) Same as: AC_COMPILE_IFELSE( [AC_LANG_PROGRAM([[INCLUDES]], [[FUNCTION-BODY]])], [ACTION-IF-TRUE], [ACTION-IF-FALSE]) *Note Running the Compiler::. This macro double quotes both INCLUDES and FUNCTION-BODY. For C and C++, INCLUDES is any ‘#include’ statements needed by the code in FUNCTION-BODY (INCLUDES is ignored if the currently selected language is Fortran or Fortran 77). The compiler and compilation flags are determined by the current language (*note Language Choice::). -- Macro: AC_TRY_CPP (INPUT, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) Same as: AC_PREPROC_IFELSE( [AC_LANG_SOURCE([[INPUT]])], [ACTION-IF-TRUE], [ACTION-IF-FALSE]) *Note Running the Preprocessor::. This macro double quotes the INPUT. -- Macro: AC_TRY_LINK (INCLUDES, FUNCTION-BODY, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) Same as: AC_LINK_IFELSE( [AC_LANG_PROGRAM([[INCLUDES]], [[FUNCTION-BODY]])], [ACTION-IF-TRUE], [ACTION-IF-FALSE]) *Note Running the Linker::. This macro double quotes both INCLUDES and FUNCTION-BODY. Depending on the current language (*note Language Choice::), create a test program to see whether a function whose body consists of FUNCTION-BODY can be compiled and linked. If the file compiles and links successfully, run shell commands ACTION-IF-FOUND, otherwise run ACTION-IF-NOT-FOUND. This macro double quotes both INCLUDES and FUNCTION-BODY. For C and C++, INCLUDES is any ‘#include’ statements needed by the code in FUNCTION-BODY (INCLUDES is ignored if the currently selected language is Fortran or Fortran 77). The compiler and compilation flags are determined by the current language (*note Language Choice::), and in addition ‘LDFLAGS’ and ‘LIBS’ are used for linking. -- Macro: AC_TRY_LINK_FUNC (FUNCTION, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) This macro is equivalent to AC_LINK_IFELSE([AC_LANG_CALL([], [FUNCTION])], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) *Note Running the Linker::. -- Macro: AC_TRY_RUN (PROGRAM, [ACTION-IF-TRUE], [ACTION-IF-FALSE], [ACTION-IF-CROSS-COMPILING = ‘AC_MSG_FAILURE’]) Same as: AC_RUN_IFELSE( [AC_LANG_SOURCE([[PROGRAM]])], [ACTION-IF-TRUE], [ACTION-IF-FALSE], [ACTION-IF-CROSS-COMPILING]) *Note Runtime::. -- Macro: AC_TYPE_SIGNAL If ‘signal.h’ declares ‘signal’ as returning a pointer to a function returning ‘void’, define ‘RETSIGTYPE’ to be ‘void’; otherwise, define it to be ‘int’. These days, it is portable to assume C89, and that signal handlers return ‘void’, without needing to use this macro or ‘RETSIGTYPE’. When targeting older K&R C, it is possible to define signal handlers as returning type ‘RETSIGTYPE’, and omit a return statement: RETSIGTYPE hup_handler () { ... } -- Macro: AC_UID_T Replaced by ‘AC_TYPE_UID_T’ (*note AC_TYPE_UID_T::). -- Macro: AC_UNISTD_H Same as ‘AC_CHECK_HEADERS([unistd.h])’ (*note AC_CHECK_HEADERS::), which is one of the tests done as a side effect by ‘AC_INCLUDES_DEFAULT’ (*note Default Includes::), so usually unnecessary to write explicitly. -- Macro: AC_USG Define ‘USG’ if the BSD string functions (‘bcopy’, ‘bzero’, ‘index’, ‘rindex’, etc) are _not_ defined in ‘strings.h’. Modern code should assume ‘string.h’ exists and should use the ISO C string functions (‘memmove’, ‘memset’, ‘strchr’, ‘strrchr’, etc) unconditionally. ‘strings.h’ may be the only header that declares ‘strcasecmp’, ‘strncasecmp’, and ‘ffs’. ‘AC_INCLUDES_DEFAULT’ checks for it (*note Default Includes::); test ‘HAVE_STRINGS_H’. -- Macro: AC_UTIME_NULL Replaced by ‘AC_FUNC_UTIME_NULL’ (*note AC_FUNC_UTIME_NULL::). -- Macro: AC_VALIDATE_CACHED_SYSTEM_TUPLE ([CMD]) If the cache file is inconsistent with the current host, target and build system types, it used to execute CMD or print a default error message. This is now handled by default. -- Macro: AC_VERBOSE (RESULT-DESCRIPTION) Replaced by ‘AC_MSG_RESULT’ (*note AC_MSG_RESULT::). -- Macro: AC_VFORK Replaced by ‘AC_FUNC_FORK’ (*note AC_FUNC_FORK::). -- Macro: AC_VPRINTF Replaced by ‘AC_FUNC_VPRINTF’ (*note AC_FUNC_VPRINTF::). -- Macro: AC_WAIT3 This macro was renamed ‘AC_FUNC_WAIT3’. However, these days portable programs should use ‘waitpid’, not ‘wait3’, as ‘wait3’ has been removed from Posix. -- Macro: AC_WARN Replaced by ‘AC_MSG_WARN’ (*note AC_MSG_WARN::). -- Macro: AC_WARNING (MESSAGE) Replaced by ‘m4_warn’ (*note m4_warn::). -- Macro: AC_WITH (PACKAGE, ACTION-IF-GIVEN, [ACTION-IF-NOT-GIVEN]) This is an obsolete version of ‘AC_ARG_WITH’ that does not support providing a help string (*note AC_ARG_WITH::). -- Macro: AC_WORDS_BIGENDIAN Replaced by ‘AC_C_BIGENDIAN’ (*note AC_C_BIGENDIAN::). -- Macro: AC_XENIX_DIR This macro used to add ‘-lx’ to output variable ‘LIBS’ if on Xenix. Also, if ‘dirent.h’ is being checked for, added ‘-ldir’ to ‘LIBS’. Now it is merely an alias of ‘AC_HEADER_DIRENT’ instead, plus some code to detect whether running XENIX on which you should not depend: AC_MSG_CHECKING([for Xenix]) AC_EGREP_CPP([yes], [#if defined M_XENIX && !defined M_UNIX yes #endif], [AC_MSG_RESULT([yes]); XENIX=yes], [AC_MSG_RESULT([no]); XENIX=]) Don’t use this macro, the dignified means to check the nature of the host is using ‘AC_CANONICAL_HOST’ (*note Canonicalizing::). -- Macro: AC_YYTEXT_POINTER This macro was renamed ‘AC_DECL_YYTEXT’, which in turn was integrated into ‘AC_PROG_LEX’ (*note AC_PROG_LEX::).  File: autoconf.info, Node: Autoconf 1, Next: Autoconf 2.13, Prev: Obsolete Macros, Up: Obsolete Constructs 18.5 Upgrading From Version 1 ============================= Autoconf version 2 is mostly backward compatible with version 1. However, it introduces better ways to do some things, and doesn’t support some of the ugly things in version 1. So, depending on how sophisticated your ‘configure.ac’ files are, you might have to do some manual work in order to upgrade to version 2. This chapter points out some problems to watch for when upgrading. Also, perhaps your ‘configure’ scripts could benefit from some of the new features in version 2; the changes are summarized in the file ‘NEWS’ in the Autoconf distribution. * Menu: * Changed File Names:: Files you might rename * Changed Makefiles:: New things to put in ‘Makefile.in’ * Changed Macros:: Macro calls you might replace * Changed Results:: Changes in how to check test results * Changed Macro Writing:: Better ways to write your own macros  File: autoconf.info, Node: Changed File Names, Next: Changed Makefiles, Up: Autoconf 1 18.5.1 Changed File Names ------------------------- If you have an ‘aclocal.m4’ installed with Autoconf (as opposed to in a particular package’s source directory), you must rename it to ‘acsite.m4’. *Note autoconf Invocation::. If you distribute ‘install.sh’ with your package, rename it to ‘install-sh’ so ‘make’ builtin rules don’t inadvertently create a file called ‘install’ from it. ‘AC_PROG_INSTALL’ looks for the script under both names, but it is best to use the new name. If you were using ‘config.h.top’, ‘config.h.bot’, or ‘acconfig.h’, you still can, but you have less clutter if you use the ‘AH_’ macros. *Note Autoheader Macros::.  File: autoconf.info, Node: Changed Makefiles, Next: Changed Macros, Prev: Changed File Names, Up: Autoconf 1 18.5.2 Changed Makefiles ------------------------ Add ‘@CFLAGS@’, ‘@CPPFLAGS@’, and ‘@LDFLAGS@’ in your ‘Makefile.in’ files, so they can take advantage of the values of those variables in the environment when ‘configure’ is run. Doing this isn’t necessary, but it’s a convenience for users. Also add ‘@configure_input@’ in a comment to each input file for ‘AC_OUTPUT’, so that the output files contain a comment saying they were produced by ‘configure’. Automatically selecting the right comment syntax for all the kinds of files that people call ‘AC_OUTPUT’ on became too much work. Add ‘config.log’ and ‘config.cache’ to the list of files you remove in ‘distclean’ targets. If you have the following in ‘Makefile.in’: prefix = /usr/local exec_prefix = $(prefix) you must change it to: prefix = @prefix@ exec_prefix = @exec_prefix@ The old behavior of replacing those variables without ‘@’ characters around them has been removed.  File: autoconf.info, Node: Changed Macros, Next: Changed Results, Prev: Changed Makefiles, Up: Autoconf 1 18.5.3 Changed Macros --------------------- Many of the macros were renamed in Autoconf version 2. You can still use the old names, but the new ones are clearer, and it’s easier to find the documentation for them. *Note Obsolete Macros::, for a table showing the new names for the old macros. Use the ‘autoupdate’ program to convert your ‘configure.ac’ to using the new macro names. *Note autoupdate Invocation::. Some macros have been superseded by similar ones that do the job better, but are not call-compatible. If you get warnings about calling obsolete macros while running ‘autoconf’, you may safely ignore them, but your ‘configure’ script generally works better if you follow the advice that is printed about what to replace the obsolete macros with. In particular, the mechanism for reporting the results of tests has changed. If you were using ‘echo’ or ‘AC_VERBOSE’ (perhaps via ‘AC_COMPILE_CHECK’), your ‘configure’ script’s output looks better if you switch to ‘AC_MSG_CHECKING’ and ‘AC_MSG_RESULT’. *Note Printing Messages::. Those macros work best in conjunction with cache variables. *Note Caching Results::.  File: autoconf.info, Node: Changed Results, Next: Changed Macro Writing, Prev: Changed Macros, Up: Autoconf 1 18.5.4 Changed Results ---------------------- If you were checking the results of previous tests by examining the shell variable ‘DEFS’, you need to switch to checking the values of the cache variables for those tests. ‘DEFS’ no longer exists while ‘configure’ is running; it is only created when generating output files. This difference from version 1 is because properly quoting the contents of that variable turned out to be too cumbersome and inefficient to do every time ‘AC_DEFINE’ is called. *Note Cache Variable Names::. For example, here is a ‘configure.ac’ fragment written for Autoconf version 1: AC_HAVE_FUNCS(syslog) case "$DEFS" in *-DHAVE_SYSLOG*) ;; *) # syslog is not in the default libraries. See if it's in some other. saved_LIBS="$LIBS" for lib in bsd socket inet; do AC_CHECKING(for syslog in -l$lib) LIBS="-l$lib $saved_LIBS" AC_HAVE_FUNCS(syslog) case "$DEFS" in *-DHAVE_SYSLOG*) break ;; *) ;; esac LIBS="$saved_LIBS" done ;; esac Here is a way to write it for version 2: AC_CHECK_FUNCS([syslog]) if test "x$ac_cv_func_syslog" = xno; then # syslog is not in the default libraries. See if it's in some other. for lib in bsd socket inet; do AC_CHECK_LIB([$lib], [syslog], [AC_DEFINE([HAVE_SYSLOG]) LIBS="-l$lib $LIBS"; break]) done fi If you were working around bugs in ‘AC_DEFINE_UNQUOTED’ by adding backslashes before quotes, you need to remove them. It now works predictably, and does not treat quotes (except back quotes) specially. *Note Setting Output Variables::. All of the Boolean shell variables set by Autoconf macros now use ‘yes’ for the true value. Most of them use ‘no’ for false, though for backward compatibility some use the empty string instead. If you were relying on a shell variable being set to something like 1 or ‘t’ for true, you need to change your tests.  File: autoconf.info, Node: Changed Macro Writing, Prev: Changed Results, Up: Autoconf 1 18.5.5 Changed Macro Writing ---------------------------- When defining your own macros, you should now use ‘AC_DEFUN’ instead of ‘define’. ‘AC_DEFUN’ automatically calls ‘AC_PROVIDE’ and ensures that macros called via ‘AC_REQUIRE’ do not interrupt other macros, to prevent nested ‘checking...’ messages on the screen. There’s no actual harm in continuing to use the older way, but it’s less convenient and attractive. *Note Macro Definitions::. You probably looked at the macros that came with Autoconf as a guide for how to do things. It would be a good idea to take a look at the new versions of them, as the style is somewhat improved and they take advantage of some new features. If you were doing tricky things with undocumented Autoconf internals (macros, variables, diversions), check whether you need to change anything to account for changes that have been made. Perhaps you can even use an officially supported technique in version 2 instead of kludging. Or perhaps not. To speed up your locally written feature tests, add caching to them. See whether any of your tests are of general enough usefulness to encapsulate them into macros that you can share.  File: autoconf.info, Node: Autoconf 2.13, Prev: Autoconf 1, Up: Obsolete Constructs 18.6 Upgrading From Version 2.13 ================================ The introduction of the previous section (*note Autoconf 1::) perfectly suits this section... Autoconf version 2.50 is mostly backward compatible with version 2.13. However, it introduces better ways to do some things, and doesn’t support some of the ugly things in version 2.13. So, depending on how sophisticated your ‘configure.ac’ files are, you might have to do some manual work in order to upgrade to version 2.50. This chapter points out some problems to watch for when upgrading. Also, perhaps your ‘configure’ scripts could benefit from some of the new features in version 2.50; the changes are summarized in the file ‘NEWS’ in the Autoconf distribution. * Menu: * Changed Quotation:: Broken code which used to work * New Macros:: Interaction with foreign macros * Hosts and Cross-Compilation:: Bugward compatibility kludges * AC_LIBOBJ vs LIBOBJS:: LIBOBJS is a forbidden token * AC_ACT_IFELSE vs AC_TRY_ACT:: A more generic scheme for testing sources  File: autoconf.info, Node: Changed Quotation, Next: New Macros, Up: Autoconf 2.13 18.6.1 Changed Quotation ------------------------ The most important changes are invisible to you: the implementation of most macros have completely changed. This allowed more factorization of the code, better error messages, a higher uniformity of the user’s interface etc. Unfortunately, as a side effect, some construct which used to (miraculously) work might break starting with Autoconf 2.50. The most common culprit is bad quotation. For instance, in the following example, the message is not properly quoted: AC_INIT AC_CHECK_HEADERS(foo.h, , AC_MSG_ERROR(cannot find foo.h, bailing out)) AC_OUTPUT Autoconf 2.13 simply ignores it: $ autoconf-2.13; ./configure --silent creating cache ./config.cache configure: error: cannot find foo.h $ while Autoconf 2.50 produces a broken ‘configure’: $ autoconf-2.50; ./configure --silent configure: error: cannot find foo.h ./configure: exit: bad non-numeric arg `bailing' ./configure: exit: bad non-numeric arg `bailing' $ The message needs to be quoted, and the ‘AC_MSG_ERROR’ invocation too! AC_INIT([Example], [1.0], [bug-example@example.org]) AC_CHECK_HEADERS([foo.h], [], [AC_MSG_ERROR([cannot find foo.h, bailing out])]) AC_OUTPUT Many many (and many more) Autoconf macros were lacking proper quotation, including no less than... ‘AC_DEFUN’ itself! $ cat configure.in AC_DEFUN([AC_PROG_INSTALL], [# My own much better version ]) AC_INIT AC_PROG_INSTALL AC_OUTPUT $ autoconf-2.13 autoconf: Undefined macros: ***BUG in Autoconf--please report*** AC_FD_MSG ***BUG in Autoconf--please report*** AC_EPI configure.in:1:AC_DEFUN([AC_PROG_INSTALL], configure.in:5:AC_PROG_INSTALL $ autoconf-2.50 $  File: autoconf.info, Node: New Macros, Next: Hosts and Cross-Compilation, Prev: Changed Quotation, Up: Autoconf 2.13 18.6.2 New Macros ----------------- While Autoconf was relatively dormant in the late 1990s, Automake provided Autoconf-like macros for a while. Starting with Autoconf 2.50 in 2001, Autoconf provided versions of these macros, integrated in the ‘AC_’ namespace, instead of ‘AM_’. But in order to ease the upgrading via ‘autoupdate’, bindings to such ‘AM_’ macros are provided. Unfortunately older versions of Automake (e.g., Automake 1.4) did not quote the names of these macros. Therefore, when ‘m4’ finds something like ‘AC_DEFUN(AM_TYPE_PTRDIFF_T, ...)’ in ‘aclocal.m4’, ‘AM_TYPE_PTRDIFF_T’ is expanded, replaced with its Autoconf definition. Fortunately Autoconf catches pre-‘AC_INIT’ expansions, and complains, in its own words: $ cat configure.ac AC_INIT([Example], [1.0], [bug-example@example.org]) AM_TYPE_PTRDIFF_T $ aclocal-1.4 $ autoconf aclocal.m4:17: error: m4_defn: undefined macro: _m4_divert_diversion aclocal.m4:17: the top level autom4te: m4 failed with exit status: 1 $ Modern versions of Automake no longer define most of these macros, and properly quote the names of the remaining macros. If you must use an old Automake, do not depend upon macros from Automake as it is simply not its job to provide macros (but the one it requires itself): $ cat configure.ac AC_INIT([Example], [1.0], [bug-example@example.org]) AM_TYPE_PTRDIFF_T $ rm aclocal.m4 $ autoupdate autoupdate: `configure.ac' is updated $ cat configure.ac AC_INIT([Example], [1.0], [bug-example@example.org]) AC_CHECK_TYPES([ptrdiff_t]) $ aclocal-1.4 $ autoconf $  File: autoconf.info, Node: Hosts and Cross-Compilation, Next: AC_LIBOBJ vs LIBOBJS, Prev: New Macros, Up: Autoconf 2.13 18.6.3 Hosts and Cross-Compilation ---------------------------------- Based on the experience of compiler writers, and after long public debates, many aspects of the cross-compilation chain have changed: − the relationship between the build, host, and target architecture types, − the command line interface for specifying them to ‘configure’, − the variables defined in ‘configure’, − the enabling of cross-compilation mode. The relationship between build, host, and target have been cleaned up: the chain of default is now simply: target defaults to host, host to build, and build to the result of ‘config.guess’. Nevertheless, in order to ease the transition from 2.13 to 2.50, the following transition scheme is implemented. _Do not rely on it_, as it will be completely disabled in a couple of releases (we cannot keep it, as it proves to cause more problems than it cures). They all default to the result of running ‘config.guess’, unless you specify either ‘--build’ or ‘--host’. In this case, the default becomes the system type you specified. If you specify both, and they’re different, ‘configure’ enters cross compilation mode, so it doesn’t run any tests that require execution. Hint: if you mean to override the result of ‘config.guess’, prefer ‘--build’ over ‘--host’. For backward compatibility, ‘configure’ accepts a system type as an option by itself. Such an option overrides the defaults for build, host, and target system types. The following configure statement configures a cross toolchain that runs on NetBSD/alpha but generates code for GNU Hurd/sparc, which is also the build platform. ./configure --host=alpha-netbsd sparc-gnu In Autoconf 2.13 and before, the variables ‘build’, ‘host’, and ‘target’ had a different semantics before and after the invocation of ‘AC_CANONICAL_BUILD’ etc. Now, the argument of ‘--build’ is strictly copied into ‘build_alias’, and is left empty otherwise. After the ‘AC_CANONICAL_BUILD’, ‘build’ is set to the canonicalized build type. To ease the transition, before, its contents is the same as that of ‘build_alias’. Do _not_ rely on this broken feature. For consistency with the backward compatibility scheme exposed above, when ‘--host’ is specified but ‘--build’ isn’t, the build system is assumed to be the same as ‘--host’, and ‘build_alias’ is set to that value. Eventually, this historically incorrect behavior will go away. The former scheme to enable cross-compilation proved to cause more harm than good, in particular, it used to be triggered too easily, leaving regular end users puzzled in front of cryptic error messages. ‘configure’ could even enter cross-compilation mode only because the compiler was not functional. This is mainly because ‘configure’ used to try to detect cross-compilation, instead of waiting for an explicit flag from the user. Now, ‘configure’ enters cross-compilation mode if and only if ‘--host’ is passed. That’s the short documentation. To ease the transition between 2.13 and its successors, a more complicated scheme is implemented. _Do not rely on the following_, as it will be removed in the near future. If you specify ‘--host’, but not ‘--build’, when ‘configure’ performs the first compiler test it tries to run an executable produced by the compiler. If the execution fails, it enters cross-compilation mode. This is fragile. Moreover, by the time the compiler test is performed, it may be too late to modify the build-system type: other tests may have already been performed. Therefore, whenever you specify ‘--host’, be sure to specify ‘--build’ too. ./configure --build=x86_64-pc-linux-gnu --host=x86_64-w64-mingw64 enters cross-compilation mode. The former interface, which consisted in setting the compiler to a cross-compiler without informing ‘configure’ is obsolete. For instance, ‘configure’ fails if it can’t run the code generated by the specified compiler if you configure as follows: ./configure CC=x86_64-w64-mingw64-gcc  File: autoconf.info, Node: AC_LIBOBJ vs LIBOBJS, Next: AC_ACT_IFELSE vs AC_TRY_ACT, Prev: Hosts and Cross-Compilation, Up: Autoconf 2.13 18.6.4 ‘AC_LIBOBJ’ vs. ‘LIBOBJS’ -------------------------------- Up to Autoconf 2.13, the replacement of functions was triggered via the variable ‘LIBOBJS’. Since Autoconf 2.50, the macro ‘AC_LIBOBJ’ should be used instead (*note Generic Functions::). Starting at Autoconf 2.53, the use of ‘LIBOBJS’ is an error. This change is mandated by the unification of the GNU Build System components. In particular, the various fragile techniques used to parse a ‘configure.ac’ are all replaced with the use of traces. As a consequence, any action must be traceable, which obsoletes critical variable assignments. Fortunately, ‘LIBOBJS’ was the only problem, and it can even be handled gracefully (read, “without your having to change something”). There were two typical uses of ‘LIBOBJS’: asking for a replacement function, and adjusting ‘LIBOBJS’ for Automake and/or Libtool. As for function replacement, the fix is immediate: use ‘AC_LIBOBJ’. For instance: LIBOBJS="$LIBOBJS fnmatch.o" LIBOBJS="$LIBOBJS malloc.$ac_objext" should be replaced with: AC_LIBOBJ([fnmatch]) AC_LIBOBJ([malloc]) When used with Automake 1.10 or newer, a suitable value for ‘LIBOBJDIR’ is set so that the ‘LIBOBJS’ and ‘LTLIBOBJS’ can be referenced from any ‘Makefile.am’. Even without Automake, arranging for ‘LIBOBJDIR’ to be set correctly enables referencing ‘LIBOBJS’ and ‘LTLIBOBJS’ in another directory. The ‘LIBOBJDIR’ feature is experimental.  File: autoconf.info, Node: AC_ACT_IFELSE vs AC_TRY_ACT, Prev: AC_LIBOBJ vs LIBOBJS, Up: Autoconf 2.13 18.6.5 ‘AC_ACT_IFELSE’ vs. ‘AC_TRY_ACT’ --------------------------------------- Since Autoconf 2.50, internal codes uses ‘AC_PREPROC_IFELSE’, ‘AC_COMPILE_IFELSE’, ‘AC_LINK_IFELSE’, and ‘AC_RUN_IFELSE’ on one hand and ‘AC_LANG_SOURCE’, and ‘AC_LANG_PROGRAM’ on the other hand instead of the deprecated ‘AC_TRY_CPP’, ‘AC_TRY_COMPILE’, ‘AC_TRY_LINK’, and ‘AC_TRY_RUN’. The motivations where: − a more consistent interface: ‘AC_TRY_COMPILE’ etc. were double quoting their arguments; − the combinatorial explosion is solved by decomposing on the one hand the generation of sources, and on the other hand executing the program; − this scheme helps supporting more languages than plain C and C++. In addition to the change of syntax, the philosophy has changed too: while emphasis was put on speed at the expense of accuracy, today’s Autoconf promotes accuracy of the testing framework at, ahem..., the expense of speed. As a perfect example of what is _not_ to be done, here is how to find out whether a header file contains a particular declaration, such as a typedef, a structure, a structure member, or a function. Use ‘AC_EGREP_HEADER’ instead of running ‘grep’ directly on the header file; on some systems the symbol might be defined in another header file that the file you are checking includes. As a (bad) example, here is how you should not check for C preprocessor symbols, either defined by header files or predefined by the C preprocessor: using ‘AC_EGREP_CPP’: AC_EGREP_CPP(yes, [#ifdef _AIX yes #endif ], is_aix=yes, is_aix=no) The above example, properly written would (i) use ‘AC_LANG_PROGRAM’, and (ii) run the compiler: AC_COMPILE_IFELSE([AC_LANG_PROGRAM( [[#ifndef _AIX error: This isn't AIX! #endif ]])], [is_aix=yes], [is_aix=no])  File: autoconf.info, Node: Using Autotest, Next: FAQ, Prev: Obsolete Constructs, Up: Top 19 Generating Test Suites with Autotest *************************************** *N.B.: This section describes a feature which is still stabilizing. Although we believe that Autotest is useful as-is, this documentation describes an interface which might change in the future: do not depend upon Autotest without subscribing to the Autoconf mailing lists.* It is paradoxical that portable projects depend on nonportable tools to run their test suite. Autoconf by itself is the paragon of this problem: although it aims at perfectly portability, up to 2.13 its test suite was using DejaGNU, a rich and complex testing framework, but which is far from being standard on Posix systems. Worse yet, it was likely to be missing on the most fragile platforms, the very platforms that are most likely to torture Autoconf and exhibit deficiencies. To circumvent this problem, many package maintainers have developed their own testing framework, based on simple shell scripts whose sole outputs are exit status values describing whether the test succeeded. Most of these tests share common patterns, and this can result in lots of duplicated code and tedious maintenance. Following exactly the same reasoning that yielded to the inception of Autoconf, Autotest provides a test suite generation framework, based on M4 macros building a portable shell script. The suite itself is equipped with automatic logging and tracing facilities which greatly diminish the interaction with bug reporters, and simple timing reports. Autoconf itself has been using Autotest for years, and we do attest that it has considerably improved the strength of the test suite and the quality of bug reports. Other projects are known to use some generation of Autotest, such as Bison, GNU Wdiff, GNU Tar, each of them with different needs, and this usage has validated Autotest as a general testing framework. Nonetheless, compared to DejaGNU, Autotest is inadequate for interactive tool testing, which is probably its main limitation. * Menu: * Using an Autotest Test Suite:: Autotest and the user * Writing Testsuites:: Autotest macros * testsuite Invocation:: Running ‘testsuite’ scripts * Making testsuite Scripts:: Using autom4te to create ‘testsuite’  File: autoconf.info, Node: Using an Autotest Test Suite, Next: Writing Testsuites, Up: Using Autotest 19.1 Using an Autotest Test Suite ================================= * Menu: * testsuite Scripts:: The concepts of Autotest * Autotest Logs:: Their contents  File: autoconf.info, Node: testsuite Scripts, Next: Autotest Logs, Up: Using an Autotest Test Suite 19.1.1 ‘testsuite’ Scripts -------------------------- Generating testing or validation suites using Autotest is rather easy. The whole validation suite is held in a file to be processed through ‘autom4te’, itself using GNU M4 under the hood, to produce a stand-alone Bourne shell script which then gets distributed. Neither ‘autom4te’ nor GNU M4 are needed at the installer’s end. Each test of the validation suite should be part of some test group. A “test group” is a sequence of interwoven tests that ought to be executed together, usually because one test in the group creates data files that a later test in the same group needs to read. Complex test groups make later debugging more tedious. It is much better to keep only a few tests per test group. Ideally there is only one test per test group. For all but the simplest packages, some file such as ‘testsuite.at’ does not fully hold all test sources, as these are often easier to maintain in separate files. Each of these separate files holds a single test group, or a sequence of test groups all addressing some common functionality in the package. In such cases, ‘testsuite.at’ merely initializes the validation suite, and sometimes does elementary health checking, before listing include statements for all other test files. The special file ‘package.m4’, containing the identification of the package, is automatically included if found. A convenient alternative consists in moving all the global issues (local Autotest macros, elementary health checking, and ‘AT_INIT’ invocation) into the file ‘local.at’, and making ‘testsuite.at’ be a simple list of ‘m4_include’s of sub test suites. In such case, generating the whole test suite or pieces of it is only a matter of choosing the ‘autom4te’ command line arguments. The validation scripts that Autotest produces are by convention called ‘testsuite’. When run, ‘testsuite’ executes each test group in turn, producing only one summary line per test to say if that particular test succeeded or failed. At end of all tests, summarizing counters get printed. One debugging directory is left for each test group which failed, if any: such directories are named ‘testsuite.dir/NN’, where NN is the sequence number of the test group, and they include: • a debugging script named ‘run’ which reruns the test in “debug mode” (*note testsuite Invocation::). The automatic generation of debugging scripts has the purpose of easing the chase for bugs. • all the files created with ‘AT_DATA’ • all the Erlang source code files created with ‘AT_CHECK_EUNIT’ • a log of the run, named ‘testsuite.log’ In the ideal situation, none of the tests fail, and consequently no debugging directory is left behind for validation. It often happens in practice that individual tests in the validation suite need to get information coming out of the configuration process. Some of this information, common for all validation suites, is provided through the file ‘atconfig’, automatically created by ‘AC_CONFIG_TESTDIR’. For configuration information which your testing environment specifically needs, you might prepare an optional file named ‘atlocal.in’, instantiated by ‘AC_CONFIG_FILES’. The configuration process produces ‘atconfig’ and ‘atlocal’ out of these two input files, and these two produced files are automatically read by the ‘testsuite’ script. Here is a diagram showing the relationship between files. Files used in preparing a software package for distribution: [package.m4] -->. \ subfile-1.at ->. [local.at] ---->+ ... \ \ subfile-i.at ---->-- testsuite.at -->-- autom4te* -->testsuite ... / subfile-n.at ->' Files used in configuring a software package: .--> atconfig / [atlocal.in] --> config.status* --< \ `--> [atlocal] Files created during test suite execution: atconfig -->. .--> testsuite.log \ / >-- testsuite* --< / \ [atlocal] ->' `--> [testsuite.dir]  File: autoconf.info, Node: Autotest Logs, Prev: testsuite Scripts, Up: Using an Autotest Test Suite 19.1.2 Autotest Logs -------------------- When run, the test suite creates a log file named after itself, e.g., a test suite named ‘testsuite’ creates ‘testsuite.log’. It contains a lot of information, usually more than maintainers actually need, but therefore most of the time it contains all that is needed: command line arguments A bad but unfortunately widespread habit consists of setting environment variables before the command, such as in ‘CC=my-home-grown-cc ./testsuite’. The test suite does not know this change, hence (i) it cannot report it to you, and (ii) it cannot preserve the value of ‘CC’ for subsequent runs. Autoconf faced exactly the same problem, and solved it by asking users to pass the variable definitions as command line arguments. Autotest requires this rule, too, but has no means to enforce it; the log then contains a trace of the variables that were changed by the user. ‘ChangeLog’ excerpts The topmost lines of all the ‘ChangeLog’ files found in the source hierarchy. This is especially useful when bugs are reported against development versions of the package, since the version string does not provide sufficient information to know the exact state of the sources the user compiled. Of course, this relies on the use of a ‘ChangeLog’. build machine Running a test suite in a cross-compile environment is not an easy task, since it would mean having the test suite run on a machine BUILD, while running programs on a machine HOST. It is much simpler to run both the test suite and the programs on HOST, but then, from the point of view of the test suite, there remains a single environment, HOST = BUILD. The log contains relevant information on the state of the BUILD machine, including some important environment variables. tested programs The absolute file name and answers to ‘--version’ of the tested programs (see *note Writing Testsuites::, ‘AT_TESTED’). configuration log The contents of ‘config.log’, as created by ‘configure’, are appended. It contains the configuration flags and a detailed report on the configuration itself.  File: autoconf.info, Node: Writing Testsuites, Next: testsuite Invocation, Prev: Using an Autotest Test Suite, Up: Using Autotest 19.2 Writing ‘testsuite.at’ =========================== The ‘testsuite.at’ is a Bourne shell script making use of special Autotest M4 macros. It often contains a call to ‘AT_INIT’ near its beginning followed by one call to ‘m4_include’ per source file for tests. Each such included file, or the remainder of ‘testsuite.at’ if include files are not used, contain a sequence of test groups. Each test group begins with a call to ‘AT_SETUP’, then an arbitrary number of shell commands or calls to ‘AT_CHECK’, and then completes with a call to ‘AT_CLEANUP’. Multiple test groups can be categorized by a call to ‘AT_BANNER’. All of the public Autotest macros have all-uppercase names in the namespace ‘^AT_’ to prevent them from accidentally conflicting with other text; Autoconf also reserves the namespace ‘^_AT_’ for internal macros. All shell variables used in the testsuite for internal purposes have mostly-lowercase names starting with ‘at_’. Autotest also uses here-document delimiters in the namespace ‘^_AT[A-Z]’, and makes use of the file system namespace ‘^at-’. Since Autoconf is built on top of M4sugar (*note Programming in M4sugar::) and M4sh (*note Programming in M4sh::), you must also be aware of those namespaces (‘^_?\(m4\|AS\)_’). In general, you _should not use_ the namespace of a package that does not own the macro or shell code you are writing. -- Macro: AT_INIT ([NAME]) Initialize Autotest. Giving a NAME to the test suite is encouraged if your package includes several test suites. Before this macro is called, ‘AT_PACKAGE_STRING’ and ‘AT_PACKAGE_BUGREPORT’ must be defined, which are used to display information about the testsuite to the user. Typically, these macros are provided by a file ‘package.m4’ built by ‘make’ (*note Making testsuite Scripts::), in order to inherit the package name, version, and bug reporting address from ‘configure.ac’. -- Macro: AT_COPYRIGHT (COPYRIGHT-NOTICE) State that, in addition to the Free Software Foundation’s copyright on the Autotest macros, parts of your test suite are covered by COPYRIGHT-NOTICE. The COPYRIGHT-NOTICE shows up in both the head of ‘testsuite’ and in ‘testsuite --version’. -- Macro: AT_ARG_OPTION (OPTIONS, HELP-TEXT, [ACTION-IF-GIVEN], [ACTION-IF-NOT-GIVEN]) Accept options from the space-separated list OPTIONS, a list that has leading dashes removed from the options. Long options will be prefixed with ‘--’, single-character options with ‘-’. The first word in this list is the primary OPTION, any others are assumed to be short-hand aliases. The variable associated with it is ‘at_arg_OPTION’, with any dashes in OPTION replaced with underscores. If the user passes ‘--OPTION’ to the ‘testsuite’, the variable will be set to ‘:’. If the user does not pass the option, or passes ‘--no-OPTION’, then the variable will be set to ‘false’. ACTION-IF-GIVEN is run each time the option is encountered; here, the variable ‘at_optarg’ will be set to ‘:’ or ‘false’ as appropriate. ‘at_optarg’ is actually just a copy of ‘at_arg_OPTION’. ACTION-IF-NOT-GIVEN will be run once after option parsing is complete and if no option from OPTIONS was used. HELP-TEXT is added to the end of the list of options shown in ‘testsuite --help’ (*note AS_HELP_STRING::). It is recommended that you use a package-specific prefix to OPTIONS names in order to avoid clashes with future Autotest built-in options. -- Macro: AT_ARG_OPTION_ARG (OPTIONS, HELP-TEXT, [ACTION-IF-GIVEN], [ACTION-IF-NOT-GIVEN]) Accept options with arguments from the space-separated list OPTIONS, a list that has leading dashes removed from the options. Long options will be prefixed with ‘--’, single-character options with ‘-’. The first word in this list is the primary OPTION, any others are assumed to be short-hand aliases. The variable associated with it is ‘at_arg_OPTION’, with any dashes in OPTION replaced with underscores. If the user passes ‘--OPTION=ARG’ or ‘--OPTION ARG’ to the ‘testsuite’, the variable will be set to ‘ARG’. ACTION-IF-GIVEN is run each time the option is encountered; here, the variable ‘at_optarg’ will be set to ‘ARG’. ‘at_optarg’ is actually just a copy of ‘at_arg_OPTION’. ACTION-IF-NOT-GIVEN will be run once after option parsing is complete and if no option from OPTIONS was used. HELP-TEXT is added to the end of the list of options shown in ‘testsuite --help’ (*note AS_HELP_STRING::). It is recommended that you use a package-specific prefix to OPTIONS names in order to avoid clashes with future Autotest built-in options. -- Macro: AT_COLOR_TESTS Enable colored test results by default when the output is connected to a terminal. -- Macro: AT_TESTED (EXECUTABLES) Log the file name and answer to ‘--version’ of each program in space-separated list EXECUTABLES. Several invocations register new executables, in other words, don’t fear registering one program several times. Autotest test suites rely on ‘PATH’ to find the tested program. This avoids the need to generate absolute names of the various tools, and makes it possible to test installed programs. Therefore, knowing which programs are being exercised is crucial to understanding problems in the test suite itself, or its occasional misuses. It is a good idea to also subscribe foreign programs you depend upon, to avoid incompatible diagnostics. EXECUTABLES is implicitly wrapped in shell double quotes, but it will still use shell variable expansion (‘$’), command substitution (‘`’), and backslash escaping (‘\’). In particular, the ‘EXEEXT’ variable is available if it is passed to the testsuite via ‘atlocal’ or ‘atconfig’. -- Macro: AT_PREPARE_TESTS (SHELL-CODE) Execute SHELL-CODE in the main testsuite process, after initializing the test suite and processing command-line options, but before running any tests. If this macro is used several times, all of the SHELL-CODEs will be executed, in the order they appeared in ‘testsuite.at’. One reason to use ‘AT_PREPARE_TESTS’ is when the programs under test are sensitive to environment variables: you can unset all these variables or reset them to safe values in SHELL-CODE. SHELL-CODE is only executed if at least one test is going to be run. In particular, it will not be executed if any of the ‘--help’, ‘--version’, ‘--list’, or ‘--clean’ options are given to ‘testsuite’ (*note testsuite Invocation::). -- Macro: AT_PREPARE_EACH_TEST (SHELL-CODE) Execute SHELL-CODE in each test group’s subshell, at the point of the ‘AT_SETUP’ that starts the test group. -- Macro: AT_TEST_HELPER_FN (NAME, ARGS, DESCRIPTION, CODE) Define a shell function that will be available to the code for each test group. Its name will be ‘ath_fn_NAME’, and its body will be CODE. (The prefix prevents name conflicts with shell functions defined by M4sh and Autotest.) ARGS should describe the function’s arguments and DESCRIPTION what it does; these are used only for documentation comments in the generated testsuite script. -- Macro: AT_BANNER (TEST-CATEGORY-NAME) This macro identifies the start of a category of related test groups. When the resulting ‘testsuite’ is invoked with more than one test group to run, its output will include a banner containing TEST-CATEGORY-NAME prior to any tests run from that category. The banner should be no more than about 40 or 50 characters. A blank banner indicates uncategorized tests; an empty line will be inserted after tests from an earlier category, effectively ending that category. -- Macro: AT_SETUP (TEST-GROUP-NAME) This macro starts a group of related tests, all to be executed in the same subshell. It accepts a single argument, which holds a few words (no more than about 30 or 40 characters) quickly describing the purpose of the test group being started. TEST-GROUP-NAME must not expand to unbalanced quotes, although quadrigraphs can be used. -- Macro: AT_KEYWORDS (KEYWORDS) Associate the space-separated list of KEYWORDS to the enclosing test group. This makes it possible to run “slices” of the test suite. For instance, if some of your test groups exercise some ‘foo’ feature, then using ‘AT_KEYWORDS(foo)’ lets you run ‘./testsuite -k foo’ to run exclusively these test groups. The TEST-GROUP-NAME of the test group is automatically recorded to ‘AT_KEYWORDS’. Several invocations within a test group accumulate new keywords. In other words, don’t fear registering the same keyword several times in a test group. -- Macro: AT_CAPTURE_FILE (FILE) If the current test group fails, log the contents of FILE. Several identical calls within one test group have no additional effect. -- Macro: AT_FAIL_IF (SHELL-CONDITION) Make the test group fail and skip the rest of its execution, if SHELL-CONDITION is true. SHELL-CONDITION is a shell expression such as a ‘test’ command. Tests before ‘AT_FAIL_IF’ will be executed and may still cause the test group to be skipped. You can instantiate this macro many times from within the same test group. You should use this macro only for very simple failure conditions. If the SHELL-CONDITION could emit any kind of output you should instead use ‘AT_CHECK’ like AT_CHECK([if SHELL-CONDITION; then exit 99; fi]) so that such output is properly recorded in the ‘testsuite.log’ file. -- Macro: AT_SKIP_IF (SHELL-CONDITION) Determine whether the test should be skipped because it requires features that are unsupported on the machine under test. SHELL-CONDITION is a shell expression such as a ‘test’ command. Tests before ‘AT_SKIP_IF’ will be executed and may still cause the test group to fail. You can instantiate this macro many times from within the same test group. You should use this macro only for very simple skip conditions. If the SHELL-CONDITION could emit any kind of output you should instead use ‘AT_CHECK’ like AT_CHECK([if SHELL-CONDITION; then exit 77; fi]) so that such output is properly recorded in the ‘testsuite.log’ file. -- Macro: AT_XFAIL_IF (SHELL-CONDITION) Determine whether the test is expected to fail because it is a known bug (for unsupported features, you should skip the test). SHELL-CONDITION is a shell expression such as a ‘test’ command; you can instantiate this macro many times from within the same test group, and one of the conditions is enough to turn the test into an expected failure. -- Macro: AT_CLEANUP End the current test group. -- Macro: AT_DATA (FILE, CONTENTS) -- Macro: AT_DATA_UNQUOTED (FILE, CONTENTS) Initialize an input data FILE with given CONTENTS. Of course, the CONTENTS have to be properly quoted between square brackets to protect against included commas or spurious M4 expansion. CONTENTS must be empty or end with a newline. FILE must be a single shell word that expands into a single file name. The difference between ‘AT_DATA’ and ‘AT_DATA_UNQUOTED’ is that only the latter performs shell variable expansion (‘$’), command substitution (‘`’), and backslash escaping (‘\’) on CONTENTS. -- Macro: AT_CHECK (COMMANDS, [STATUS = ‘0’], [STDOUT], [STDERR], [RUN-IF-FAIL], [RUN-IF-PASS]) -- Macro: AT_CHECK_UNQUOTED (COMMANDS, [STATUS = ‘0’], [STDOUT], [STDERR], [RUN-IF-FAIL], [RUN-IF-PASS]) Perform a test, by running the shell COMMANDS in a subshell. COMMANDS is output as-is, so shell expansions are honored. These commands are expected to have a final exit status of STATUS, and to produce output as described by STDOUT and STDERR (see below). This macro must be invoked in between ‘AT_SETUP’ and ‘AT_CLEANUP’. If COMMANDS exit with unexpected status 77, then the rest of the test group is skipped. If COMMANDS exit with unexpected status 99, then the test group is immediately failed; this is called a _hard failure_. Otherwise, the test is considered to have succeeeded if all of the status, stdout, and stderr expectations were met. If RUN-IF-FAIL is nonempty, it provides extra shell commands to run when the test fails; if RUN-IF-PASS is nonempty, it provides extra shell commands to run when the test succeeds. These commands are _not_ run in a subshell, and they are not run when the test group is skipped (exit code 77) or hard-failed (exit code 99). They may change whether the test group is considered to have succeeded, by modifying the shell variable ‘at_failed’; set it to ‘:’ to indicate that the test group has failed, or ‘false’ to indicate that it has succeeded. The exit status of COMMANDS is available to RUN-IF-FAIL and RUN-IF-PASS commands in the ‘at_status’ shell variable. The output from COMMANDS is also available, in the files named by the ‘at_stdout’ and ‘at_stderr’ variables. If STATUS is the literal ‘ignore’, then the exit status of COMMANDS is not checked, except for the special cases of 77 (skip) and 99 (hard failure). The existence of hard failures allows one to mark a test as an expected failure with ‘AT_XFAIL_IF’ because a feature has not yet been implemented, but to still distinguish between gracefully handling the missing feature and dumping core. If the value of the STDOUT or STDERR parameter is one of the literals in the following table, then the test treats the output according to the rules of that literal. ‘ignore’ The content of the output is ignored, but still captured in the test group log (if the testsuite is run with the ‘-v’ option, the test group log is displayed as the test is run; if the test group later fails, the test group log is also copied into the overall testsuite log). This action is valid for both STDOUT and STDERR. ‘ignore-nolog’ The content of the output is ignored, and nothing is captured in the log files. If COMMANDS are likely to produce binary output (including long lines) or large amounts of output, then logging the output can make it harder to locate details related to subsequent tests within the group, and could potentially corrupt terminal display of a user running ‘testsuite -v’. This action is valid for both STDOUT and STDERR. ‘stdout’ Only valid as the STDOUT parameter. Capture the content of standard output in both a file named ‘stdout’ and the test group log. Subsequent commands in the test group can then post-process the file. This action is often used when it is desired to use ‘grep’ to look for a substring in the output, or when the output must be post-processed to normalize error messages into a common form. ‘stderr’ Only valid as the STDERR parameter. Capture the content of standard error in both a file named ‘stderr’ and the test group log. ‘stdout-nolog’ ‘stderr-nolog’ Like ‘stdout’ or ‘stderr’, except that the captured output is not duplicated into the test group log. This action is particularly useful for an intermediate check that produces large amounts of data, which will be followed by another check that filters down to the relevant data, as it makes it easier to locate details in the log. ‘expout’ Only valid as the STDOUT parameter. Compare standard output with the previously created file ‘expout’, and list any differences in the testsuite log. ‘experr’ Only valid as the STDERR parameter. Compare standard error with the previously created file ‘experr’, and list any differences in the testsuite log. Otherwise, the values of the STDOUT and STDERR parameters are treated as text that must exactly match the output given by COMMANDS on standard output and standard error (including an empty parameter for no output); any differences are captured in the testsuite log and the test is failed (unless an unexpected exit status of 77 skipped the test instead). ‘AT_CHECK_UNQUOTED’ performs shell variable expansion (‘$’), command substitution (‘`’), and backslash escaping (‘\’) on comparison text given in the STDOUT and STDERR parameters; ‘AT_CHECK’ does not. There is no difference in the interpretation of COMMANDS. -- Macro: AT_CHECK_EUNIT (MODULE, TEST-SPEC, [ERLFLAGS], [RUN-IF-FAIL], [RUN-IF-PASS]) Initialize and execute an Erlang module named MODULE that performs tests following the TEST-SPEC EUnit test specification. TEST-SPEC must be a valid EUnit test specification, as defined in the EUnit Reference Manual (https://erlang.org/doc/apps/eunit/index.html). ERLFLAGS are optional command-line options passed to the Erlang interpreter to execute the test Erlang module. Typically, ERLFLAGS defines at least the paths to directories containing the compiled Erlang modules under test, as ‘-pa path1 path2 ...’. For example, the unit tests associated with Erlang module ‘testme’, which compiled code is in subdirectory ‘src’, can be performed with: AT_CHECK_EUNIT([testme_testsuite], [{module, testme}], [-pa "${abs_top_builddir}/src"]) This macro must be invoked in between ‘AT_SETUP’ and ‘AT_CLEANUP’. Variables ‘ERL’, ‘ERLC’, and (optionally) ‘ERLCFLAGS’ must be defined as the path of the Erlang interpreter, the path of the Erlang compiler, and the command-line flags to pass to the compiler, respectively. Those variables should be configured in ‘configure.ac’ using the ‘AC_ERLANG_PATH_ERL’ and ‘AC_ERLANG_PATH_ERLC’ macros, and the configured values of those variables are automatically defined in the testsuite. If ‘ERL’ or ‘ERLC’ is not defined, the test group is skipped. If the EUnit library cannot be found, i.e. if module ‘eunit’ cannot be loaded, the test group is skipped. Otherwise, if TEST-SPEC is an invalid EUnit test specification, the test group fails. Otherwise, if the EUnit test passes, shell commands RUN-IF-PASS are executed or, if the EUnit test fails, shell commands RUN-IF-FAIL are executed and the test group fails. Only the generated test Erlang module is automatically compiled and executed. If TEST-SPEC involves testing other Erlang modules, e.g. module ‘testme’ in the example above, those modules must be already compiled. If the testsuite is run in verbose mode and with the ‘--verbose’ option, EUnit is also run in verbose mode to output more details about individual unit tests.  File: autoconf.info, Node: testsuite Invocation, Next: Making testsuite Scripts, Prev: Writing Testsuites, Up: Using Autotest 19.3 Running ‘testsuite’ Scripts ================================ Autotest test suites support the following options: ‘--help’ ‘-h’ Display the list of options and exit successfully. ‘--version’ ‘-V’ Display the version of the test suite and exit successfully. ‘--directory=DIR’ ‘-C DIR’ Change the current directory to DIR before creating any files. Useful for running the testsuite in a subdirectory from a top-level Makefile. ‘--jobs[=N]’ ‘-j[N]’ Run N tests in parallel, if possible. If N is not given, run all given tests in parallel. Note that there should be no space before the argument to ‘-j’, as ‘-j NUMBER’ denotes the separate arguments ‘-j’ and ‘NUMBER’, see below. In parallel mode, the standard input device of the testsuite script is not available to commands inside a test group. Furthermore, banner lines are not printed, and the summary line for each test group is output after the test group completes. Summary lines may appear unordered. If verbose and trace output are enabled (see below), they may appear intermixed from concurrently running tests. Parallel mode requires the ‘mkfifo’ command to work, and will be silently disabled otherwise. ‘--clean’ ‘-c’ Remove all the files the test suite might have created and exit. Meant for ‘clean’ Make targets. ‘--list’ ‘-l’ List all the tests (or only the selection), including their possible keywords. By default all tests are performed (or described with ‘--list’) silently in the default environment, but the environment, set of tests, and verbosity level can be tuned: ‘VARIABLE=VALUE’ Set the environment VARIABLE to VALUE. Use this rather than ‘FOO=foo ./testsuite’ as debugging scripts would then run in a different environment. The variable ‘AUTOTEST_PATH’ specifies the testing path to prepend to ‘PATH’. Relative directory names (not starting with ‘/’) are considered to be relative to the top level of the package being built. All directories are made absolute, first starting from the top level _build_ tree, then from the _source_ tree. For instance ‘./testsuite AUTOTEST_PATH=tests:bin’ for a ‘/src/foo-1.0’ source package built in ‘/tmp/foo’ results in ‘/tmp/foo/tests:/tmp/foo/bin’ and then ‘/src/foo-1.0/tests:/src/foo-1.0/bin’ being prepended to ‘PATH’. ‘NUMBER’ ‘NUMBER-NUMBER’ ‘NUMBER-’ ‘-NUMBER’ Add the corresponding test groups, with obvious semantics, to the selection. ‘--keywords=KEYWORDS’ ‘-k KEYWORDS’ Add to the selection the test groups with title or keywords (arguments to ‘AT_SETUP’ or ‘AT_KEYWORDS’) that match _all_ keywords of the comma separated list KEYWORDS, case-insensitively. Use ‘!’ immediately before the keyword to invert the selection for this keyword. By default, the keywords match whole words; enclose them in ‘.*’ to also match parts of words. For example, running ./testsuite -k 'autoupdate,.*FUNC.*' selects all tests tagged ‘autoupdate’ _and_ with tags containing ‘FUNC’ (as in ‘AC_CHECK_FUNC’, ‘AC_FUNC_ALLOCA’, etc.), while ./testsuite -k '!autoupdate' -k '.*FUNC.*' selects all tests not tagged ‘autoupdate’ _or_ with tags containing ‘FUNC’. ‘--errexit’ ‘-e’ If any test fails, immediately abort testing. This implies ‘--debug’: post test group clean up, and top-level logging are inhibited. This option is meant for the full test suite, it is not really useful for generated debugging scripts. If the testsuite is run in parallel mode using ‘--jobs’, then concurrently running tests will finish before exiting. ‘--verbose’ ‘-v’ Force more verbosity in the detailed output of what is being done. This is the default for debugging scripts. ‘--color’ ‘--color[=never|auto|always]’ Enable colored test results. Without an argument, or with ‘always’, test results will be colored. With ‘never’, color mode is turned off. Otherwise, if either the macro ‘AT_COLOR_TESTS’ is used by the testsuite author, or the argument ‘auto’ is given, then test results are colored if standard output is connected to a terminal. ‘--debug’ ‘-d’ Do not remove the files after a test group was performed—but they are still removed _before_, therefore using this option is sane when running several test groups. Create debugging scripts. Do not overwrite the top-level log (in order to preserve a supposedly existing full log file). This is the default for debugging scripts, but it can also be useful to debug the testsuite itself. ‘--recheck’ Add to the selection all test groups that failed or passed unexpectedly during the last non-debugging test run. ‘--trace’ ‘-x’ Trigger shell tracing of the test groups. Besides these options accepted by every Autotest testsuite, the testsuite author might have added package-specific options via the ‘AT_ARG_OPTION’ and ‘AT_ARG_OPTION_ARG’ macros (*note Writing Testsuites::); refer to ‘testsuite --help’ and the package documentation for details.  File: autoconf.info, Node: Making testsuite Scripts, Prev: testsuite Invocation, Up: Using Autotest 19.4 Making ‘testsuite’ Scripts =============================== For putting Autotest into movement, you need some configuration and makefile machinery. We recommend, at least if your package uses deep or shallow hierarchies, that you use ‘tests/’ as the name of the directory holding all your tests and their makefile. Here is a check list of things to do, followed by an example, taking into consideration whether you are also using Automake. − Make sure to create the file ‘package.m4’, which defines the identity of the package. It must define ‘AT_PACKAGE_STRING’, the full signature of the package, and ‘AT_PACKAGE_BUGREPORT’, the address to which bug reports should be sent. For sake of completeness, we suggest that you also define ‘AT_PACKAGE_NAME’, ‘AT_PACKAGE_TARNAME’, ‘AT_PACKAGE_VERSION’, and ‘AT_PACKAGE_URL’. *Note Initializing configure::, for a description of these variables. Be sure to distribute ‘package.m4’ and to put it into the source hierarchy: the test suite ought to be shipped! See below for an example. − Invoke ‘AC_CONFIG_TESTDIR’ in your ‘configure.ac’. -- Macro: AC_CONFIG_TESTDIR (DIRECTORY, [TEST-PATH = DIRECTORY] An Autotest test suite is to be configured in DIRECTORY. This macro causes ‘DIRECTORY/atconfig’ to be created by ‘config.status’ and sets the default ‘AUTOTEST_PATH’ to TEST-PATH (*note testsuite Invocation::). − Still within ‘configure.ac’, as appropriate, ensure that some ‘AC_CONFIG_FILES’ command includes substitution for ‘tests/atlocal’. − Also within your ‘configure.ac’, arrange for the ‘AUTOM4TE’ variable to be set. − The appropriate ‘Makefile’ should be modified so the validation in your package is triggered by ‘make check’. The following example demonstrates the above checklist, first by assuming that you are using Automake (see below for tweaks to make to get the same results without Automake). Begin by adding the following lines to your ‘configure.ac’: # Initialize the test suite. AC_CONFIG_TESTDIR([tests]) AC_CONFIG_FILES([tests/Makefile tests/atlocal]) AM_MISSING_PROG([AUTOM4TE], [autom4te]) Next, add the following lines to your ‘tests/Makefile.am’, in order to link ‘make check’ with a validation suite. # The ':;' works around a Bash 3.2 bug when the output is not writable. $(srcdir)/package.m4: $(top_srcdir)/configure.ac :;{ \ echo '# Signature of the current package.' && \ echo 'm4_define([AT_PACKAGE_NAME],' && \ echo ' [$(PACKAGE_NAME)])' && \ echo 'm4_define([AT_PACKAGE_TARNAME],' && \ echo ' [$(PACKAGE_TARNAME)])' && \ echo 'm4_define([AT_PACKAGE_VERSION],' && \ echo ' [$(PACKAGE_VERSION)])' && \ echo 'm4_define([AT_PACKAGE_STRING],' && \ echo ' [$(PACKAGE_STRING)])' && \ echo 'm4_define([AT_PACKAGE_BUGREPORT],' && \ echo ' [$(PACKAGE_BUGREPORT)])'; \ echo 'm4_define([AT_PACKAGE_URL],' && \ echo ' [$(PACKAGE_URL)])'; \ } >'$(srcdir)/package.m4' EXTRA_DIST = testsuite.at $(srcdir)/package.m4 $(TESTSUITE) atlocal.in TESTSUITE = $(srcdir)/testsuite check-local: atconfig atlocal $(TESTSUITE) $(SHELL) '$(TESTSUITE)' $(TESTSUITEFLAGS) installcheck-local: atconfig atlocal $(TESTSUITE) $(SHELL) '$(TESTSUITE)' AUTOTEST_PATH='$(bindir)' \ $(TESTSUITEFLAGS) clean-local: test ! -f '$(TESTSUITE)' || \ $(SHELL) '$(TESTSUITE)' --clean AUTOTEST = $(AUTOM4TE) --language=autotest $(TESTSUITE): $(srcdir)/testsuite.at $(srcdir)/package.m4 $(AUTOTEST) -I '$(srcdir)' -o $@.tmp $@.at mv $@.tmp $@ Note that the built testsuite is distributed; this is necessary because users might not have Autoconf installed, and thus would not be able to rebuild it. Likewise, the use of Automake’s ‘AM_MISSING_PROG’ will arrange for the definition of ‘$AUTOM4TE’ within the Makefile to provide the user with a nicer error message if they modify a source file to the testsuite, and accidentally trigger the rebuild rules. You might want to list explicitly the dependencies, i.e., the list of the files ‘testsuite.at’ includes. If you don’t use Automake, you should make the following tweaks. In your ‘configure.ac’, replace the ‘AM_MISSING_PROG’ line above with ‘AC_PATH_PROG([AUTOM4TE], [autom4te], [false])’. You are welcome to also try using the ‘missing’ script from the Automake project instead of ‘false’, to try to get a nicer error message when the user modifies prerequisites but did not have Autoconf installed, but at that point you may be better off using Automake. Then, take the code suggested above for ‘tests/Makefile.am’ and place it in your ‘tests/Makefile.in’ instead. Add code to your ‘tests/Makefile.in’ to ensure that ‘$(EXTRA_DIST)’ files are distributed, as well as adding the following additional lines to prepare the set of needed Makefile variables: subdir = tests PACKAGE_NAME = @PACKAGE_NAME@ PACKAGE_TARNAME = @PACKAGE_TARNAME@ PACKAGE_VERSION = @PACKAGE_VERSION@ PACKAGE_STRING = @PACKAGE_STRING@ PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ PACKAGE_URL = @PACKAGE_URL@ AUTOM4TE = @AUTOM4TE@ atconfig: $(top_builddir)/config.status cd $(top_builddir) && \ $(SHELL) ./config.status $(subdir)/$@ atlocal: $(srcdir)/atlocal.in $(top_builddir)/config.status cd $(top_builddir) && \ $(SHELL) ./config.status $(subdir)/$@ Using the above example (with or without Automake), and assuming you were careful to not initialize ‘TESTSUITEFLAGS’ within your makefile, you can now fine-tune test suite execution at runtime by altering this variable, for example: make check TESTSUITEFLAGS='-v -d -x 75 -k AC_PROG_CC CFLAGS=-g'  File: autoconf.info, Node: FAQ, Next: History, Prev: Using Autotest, Up: Top 20 Frequent Autoconf Questions, with answers ******************************************** Several questions about Autoconf come up occasionally. Here some of them are addressed. * Menu: * Distributing:: Distributing ‘configure’ scripts * Why GNU M4:: Why not use the standard M4? * Bootstrapping:: Autoconf and GNU M4 require each other? * Why Not Imake:: Why GNU uses ‘configure’ instead of Imake * Defining Directories:: Passing ‘datadir’ to program * Autom4te Cache:: What is it? Can I remove it? * Present But Cannot Be Compiled:: Compiler and Preprocessor Disagree * Expanded Before Required:: Expanded Before Required * Debugging:: Debugging ‘configure’ scripts  File: autoconf.info, Node: Distributing, Next: Why GNU M4, Up: FAQ 20.1 Distributing ‘configure’ Scripts ===================================== What are the restrictions on distributing ‘configure’ scripts that Autoconf generates? How does that affect my programs that use them? There are no restrictions on how the configuration scripts that Autoconf produces may be distributed or used. In Autoconf version 1, they were covered by the GNU General Public License. We still encourage software authors to distribute their work under terms like those of the GPL, but doing so is not required to use Autoconf. Of the other files that might be used with ‘configure’, ‘config.h.in’ is under whatever copyright you use for your ‘configure.ac’. ‘config.sub’ and ‘config.guess’ have an exception to the GPL when they are used with an Autoconf-generated ‘configure’ script, which permits you to distribute them under the same terms as the rest of your package. ‘install-sh’ is from the X Consortium and is not copyrighted.  File: autoconf.info, Node: Why GNU M4, Next: Bootstrapping, Prev: Distributing, Up: FAQ 20.2 Why Require GNU M4? ======================== Why does Autoconf require GNU M4? Many M4 implementations have hard-coded limitations on the size and number of macros that Autoconf exceeds. They also lack several builtin macros that it would be difficult to get along without in a sophisticated application like Autoconf, including: m4_builtin m4_indir m4_bpatsubst __file__ __line__ Autoconf requires version 1.4.6 or later of GNU M4. Since only software maintainers need to use Autoconf, and since GNU M4 is simple to configure and install, it seems reasonable to require GNU M4 to be installed also. Many maintainers of GNU and other free software already have most of the GNU utilities installed, since they prefer them.  File: autoconf.info, Node: Bootstrapping, Next: Why Not Imake, Prev: Why GNU M4, Up: FAQ 20.3 How Can I Bootstrap? ========================= If Autoconf requires GNU M4 and GNU M4 has an Autoconf ‘configure’ script, how do I bootstrap? It seems like a chicken and egg problem! This is a misunderstanding. Although GNU M4 does come with a ‘configure’ script produced by Autoconf, Autoconf is not required in order to run the script and install GNU M4. Autoconf is only required if you want to change the M4 ‘configure’ script, which few people have to do (mainly its maintainer).  File: autoconf.info, Node: Why Not Imake, Next: Defining Directories, Prev: Bootstrapping, Up: FAQ 20.4 Why Not Imake? =================== Why not use Imake instead of ‘configure’ scripts? Several people have written addressing this question, so adaptations of their explanations are included here. The following answer is based on one written by Richard Pixley: Autoconf generated scripts frequently work on machines that it has never been set up to handle before. That is, it does a good job of inferring a configuration for a new system. Imake cannot do this. Imake uses a common database of host specific data. For X11, this makes sense because the distribution is made as a collection of tools, by one central authority who has control over the database. GNU tools are not released this way. Each GNU tool has a maintainer; these maintainers are scattered across the world. Using a common database would be a maintenance nightmare. Autoconf may appear to be this kind of database, but in fact it is not. Instead of listing host dependencies, it lists program requirements. If you view the GNU suite as a collection of native tools, then the problems are similar. But the GNU development tools can be configured as cross tools in almost any host+target permutation. All of these configurations can be installed concurrently. They can even be configured to share host independent files across hosts. Imake doesn’t address these issues. Imake templates are a form of standardization. The GNU coding standards address the same issues without necessarily imposing the same restrictions. Here is some further explanation, written by Per Bothner: One of the advantages of Imake is that it is easy to generate large makefiles using the ‘#include’ and macro mechanisms of ‘cpp’. However, ‘cpp’ is not programmable: it has limited conditional facilities, and no looping. And ‘cpp’ cannot inspect its environment. All of these problems are solved by using ‘sh’ instead of ‘cpp’. The shell is fully programmable, has macro substitution, can execute (or source) other shell scripts, and can inspect its environment. Paul Eggert elaborates more: With Autoconf, installers need not assume that Imake itself is already installed and working well. This may not seem like much of an advantage to people who are accustomed to Imake. But on many hosts Imake is not installed or the default installation is not working well, and requiring Imake to install a package hinders the acceptance of that package on those hosts. For example, the Imake template and configuration files might not be installed properly on a host, or the Imake build procedure might wrongly assume that all source files are in one big directory tree, or the Imake configuration might assume one compiler whereas the package or the installer needs to use another, or there might be a version mismatch between the Imake expected by the package and the Imake supported by the host. These problems are much rarer with Autoconf, where each package comes with its own independent configuration processor. Also, Imake often suffers from unexpected interactions between ‘make’ and the installer’s C preprocessor. The fundamental problem here is that the C preprocessor was designed to preprocess C programs, not makefiles. This is much less of a problem with Autoconf, which uses the general-purpose preprocessor M4, and where the package’s author (rather than the installer) does the preprocessing in a standard way. Finally, Mark Eichin notes: Imake isn’t all that extensible, either. In order to add new features to Imake, you need to provide your own project template, and duplicate most of the features of the existing one. This means that for a sophisticated project, using the vendor-provided Imake templates fails to provide any leverage—since they don’t cover anything that your own project needs (unless it is an X11 program). On the other side, though: The one advantage that Imake has over ‘configure’: ‘Imakefile’ files tend to be much shorter (likewise, less redundant) than ‘Makefile.in’ files. There is a fix to this, however—at least for the Kerberos V5 tree, we’ve modified things to call in common ‘post.in’ and ‘pre.in’ makefile fragments for the entire tree. This means that a lot of common things don’t have to be duplicated, even though they normally are in ‘configure’ setups.  File: autoconf.info, Node: Defining Directories, Next: Autom4te Cache, Prev: Why Not Imake, Up: FAQ 20.5 How Do I ‘#define’ Installation Directories? ================================================= My program needs library files, installed in ‘datadir’ and similar. If I use AC_DEFINE_UNQUOTED([DATADIR], [$datadir], [Define to the read-only architecture-independent data directory.]) I get #define DATADIR "${prefix}/share" As already explained, this behavior is on purpose, mandated by the GNU Coding Standards, see *note Installation Directory Variables::. There are several means to achieve a similar goal: − Do not use ‘AC_DEFINE’ but use your makefile to pass the actual value of ‘datadir’ via compilation flags. *Note Installation Directory Variables::, for the details. − This solution can be simplified when compiling a program: you may either extend the ‘CPPFLAGS’: CPPFLAGS = -DDATADIR='"$(datadir)"' @CPPFLAGS@ If you are using Automake, you should use ‘AM_CPPFLAGS’ instead: AM_CPPFLAGS = -DDATADIR='"$(datadir)"' Alternatively, create a dedicated header file: DISTCLEANFILES = myprog-paths.h myprog-paths.h: Makefile echo '#define DATADIR "$(datadir)"' >$@ The Gnulib module ‘configmake’ provides such a header with all the standard directory variables defined, *note (gnulib)configmake::. − Use ‘AC_DEFINE’ but have ‘configure’ compute the literal value of ‘datadir’ and others. Many people have wrapped macros to automate this task; for an example, see the macro ‘AC_DEFINE_DIR’ from the Autoconf Macro Archive (https://www.gnu.org/software/autoconf-archive/). This solution does not conform to the GNU Coding Standards. − Note that all the previous solutions hard wire the absolute name of these directories in the executables, which is not a good property. You may try to compute the names relative to ‘prefix’, and try to find ‘prefix’ at runtime, this way your package is relocatable.  File: autoconf.info, Node: Autom4te Cache, Next: Present But Cannot Be Compiled, Prev: Defining Directories, Up: FAQ 20.6 What is ‘autom4te.cache’? ============================== What is this directory ‘autom4te.cache’? Can I safely remove it? In the GNU Build System, ‘configure.ac’ plays a central role and is read by many tools: ‘autoconf’ to create ‘configure’, ‘autoheader’ to create ‘config.h.in’, ‘automake’ to create ‘Makefile.in’, ‘autoscan’ to check the completeness of ‘configure.ac’, ‘autoreconf’ to check the GNU Build System components that are used. To “read ‘configure.ac’” actually means to compile it with M4, which can be a long process for complex ‘configure.ac’. This is why all these tools, instead of running directly M4, invoke ‘autom4te’ (*note autom4te Invocation::) which, while answering to a specific demand, stores additional information in ‘autom4te.cache’ for future runs. For instance, if you run ‘autoconf’, behind the scenes, ‘autom4te’ also stores information for the other tools, so that when you invoke ‘autoheader’ or ‘automake’ etc., reprocessing ‘configure.ac’ is not needed. The speed up is frequently 30%, and is increasing with the size of ‘configure.ac’. But it is and remains being simply a cache: you can safely remove it. Can I permanently get rid of it? The creation of this cache can be disabled from ‘~/.autom4te.cfg’, see *note Customizing autom4te::, for more details. You should be aware that disabling the cache slows down the Autoconf test suite by 40%. The more GNU Build System components are used, the more the cache is useful; for instance running ‘autoreconf -f’ on the Core Utilities is twice slower without the cache _although ‘--force’ implies that the cache is not fully exploited_, and eight times slower than without ‘--force’.  File: autoconf.info, Node: Present But Cannot Be Compiled, Next: Expanded Before Required, Prev: Autom4te Cache, Up: FAQ 20.7 Header Present But Cannot Be Compiled ========================================== The most important guideline to bear in mind when checking for features is to mimic as much as possible the intended use. Unfortunately, old versions of ‘AC_CHECK_HEADER’ and ‘AC_CHECK_HEADERS’ failed to follow this idea, and called the preprocessor, instead of the compiler, to check for headers. As a result, incompatibilities between headers went unnoticed during configuration, and maintainers finally had to deal with this issue elsewhere. The transition began with Autoconf 2.56. As of Autoconf 2.64 both checks are performed, and ‘configure’ complains loudly if the compiler and the preprocessor do not agree. However, only the compiler result is considered. As of Autoconf 2.70, only the compiler check is performed. Consider the following example: $ cat number.h typedef int number; $ cat pi.h const number pi = 3; $ cat configure.ac AC_INIT([Example], [1.0], [bug-example@example.org]) AC_CHECK_HEADERS([pi.h]) $ autoconf -Wall $ ./configure CPPFLAGS='-I.' checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.out checking for suffix of executables... checking whether we are cross compiling... no checking for suffix of object files... o checking whether the compiler supports GNU C... yes checking whether gcc accepts -g... yes checking for gcc option to enable C11 features... -std=gnu11 checking for sys/types.h... yes checking for sys/stat.h... yes checking for strings.h... yes checking for inttypes.h... yes checking for stdint.h... yes checking for unistd.h... yes checking for pi.h... no The proper way to handle this case is using the fourth argument (*note Generic Headers::): $ cat configure.ac AC_INIT([Example], [1.0], [bug-example@example.org]) AC_CHECK_HEADERS([number.h pi.h], [], [], [[#ifdef HAVE_NUMBER_H # include #endif ]]) $ autoconf -Wall $ ./configure CPPFLAGS='-I.' checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.out checking for suffix of executables... checking whether we are cross compiling... no checking for suffix of object files... o checking whether the compiler supports GNU C... yes checking whether gcc accepts -g... yes checking for gcc option to enable C11 features... -std=gnu11 checking for number.h... yes checking for pi.h... yes See *note Particular Headers::, for a list of headers with their prerequisites.  File: autoconf.info, Node: Expanded Before Required, Next: Debugging, Prev: Present But Cannot Be Compiled, Up: FAQ 20.8 Expanded Before Required ============================= Older versions of Autoconf silently built files with incorrect ordering between dependent macros if an outer macro first expanded, then later indirectly required, an inner macro. Starting with Autoconf 2.64, this situation no longer generates out-of-order code, but results in duplicate output and a syntax warning: $ cat configure.ac ⇒AC_DEFUN([TESTA], [[echo in A ⇒if test -n "$SEEN_A" ; then echo duplicate ; fi ⇒SEEN_A=:]]) ⇒AC_DEFUN([TESTB], [AC_REQUIRE([TESTA])[echo in B ⇒if test -z "$SEEN_A" ; then echo bug ; fi]]) ⇒AC_DEFUN([TESTC], [AC_REQUIRE([TESTB])[echo in C]]) ⇒AC_DEFUN([OUTER], [[echo in OUTER] ⇒TESTA ⇒TESTC]) ⇒AC_INIT ⇒OUTER ⇒AC_OUTPUT $ autoconf ⇒configure.ac:11: warning: AC_REQUIRE: ⇒ `TESTA' was expanded before it was required ⇒configure.ac:4: TESTB is expanded from... ⇒configure.ac:6: TESTC is expanded from... ⇒configure.ac:7: OUTER is expanded from... ⇒configure.ac:11: the top level To avoid this warning, decide what purpose the macro in question serves. If it only needs to be expanded once (for example, if it provides initialization text used by later macros), then the simplest fix is to change the macro to be declared with ‘AC_DEFUN_ONCE’ (*note One-Shot Macros::), although this only works in Autoconf 2.64 and newer. A more portable fix is to change all instances of direct calls to instead go through ‘AC_REQUIRE’ (*note Prerequisite Macros::). If, instead, the macro is parameterized by arguments or by the current definition of other macros in the m4 environment, then the macro should always be directly expanded instead of required. For another case study, consider this example trimmed down from an actual package. Originally, the package contained shell code and multiple macro invocations at the top level of ‘configure.ac’: AC_DEFUN([FOO], [AC_COMPILE_IFELSE([...])]) foobar= AC_PROG_CC FOO but that was getting complex, so the author wanted to offload some of the text into a new macro in another file included via ‘aclocal.m4’. The naïve approach merely wraps the text in a new macro: AC_DEFUN([FOO], [AC_COMPILE_IFELSE([...])]) AC_DEFUN([BAR], [ foobar= AC_PROG_CC FOO ]) BAR With older versions of Autoconf, the setting of ‘foobar=’ occurs before the single compiler check, as the author intended. But with Autoconf 2.64, this issues the “expanded before it was required” warning for ‘AC_PROG_CC’, and outputs two copies of the compiler check, one before ‘foobar=’, and one after. To understand why this is happening, remember that the use of ‘AC_COMPILE_IFELSE’ includes a call to ‘AC_REQUIRE([AC_PROG_CC])’ under the hood. According to the documented semantics of ‘AC_REQUIRE’, this means that ‘AC_PROG_CC’ _must_ occur before the body of the outermost ‘AC_DEFUN’, which in this case is ‘BAR’, thus preceding the use of ‘foobar=’. The older versions of Autoconf were broken with regards to the rules of ‘AC_REQUIRE’, which explains why the code changed from one over to two copies of ‘AC_PROG_CC’ when upgrading autoconf. In other words, the author was unknowingly relying on a bug exploit to get the desired results, and that exploit broke once the bug was fixed. So, what recourse does the author have, to restore their intended semantics of setting ‘foobar=’ prior to a single compiler check, regardless of whether Autoconf 2.63 or 2.64 is used? One idea is to remember that only ‘AC_DEFUN’ is impacted by ‘AC_REQUIRE’; there is always the possibility of using the lower-level ‘m4_define’: AC_DEFUN([FOO], [AC_COMPILE_IFELSE([...])]) m4_define([BAR], [ foobar= AC_PROG_CC FOO ]) BAR This works great if everything is in the same file. However, it does not help in the case where the author wants to have ‘aclocal’ find the definition of ‘BAR’ from its own file, since ‘aclocal’ requires the use of ‘AC_DEFUN’. In this case, a better fix is to recognize that if ‘BAR’ also uses ‘AC_REQUIRE’, then there will no longer be direct expansion prior to a subsequent require. Then, by creating yet another helper macro, the author can once again guarantee a single invocation of ‘AC_PROG_CC’, which will still occur after ‘foobar=’. The author can also use ‘AC_BEFORE’ to make sure no other macro appearing before ‘BAR’ has triggered an unwanted expansion of ‘AC_PROG_CC’. AC_DEFUN([FOO], [AC_COMPILE_IFELSE([...])]) AC_DEFUN([BEFORE_CC], [ foobar= ]) AC_DEFUN([BAR], [ AC_BEFORE([$0], [AC_PROG_CC])dnl AC_REQUIRE([BEFORE_CC])dnl AC_REQUIRE([AC_PROG_CC])dnl FOO ]) BAR  File: autoconf.info, Node: Debugging, Prev: Expanded Before Required, Up: FAQ 20.9 Debugging ‘configure’ scripts ================================== While in general, ‘configure’ scripts generated by Autoconf strive to be fairly portable to various systems, compilers, shells, and other tools, it may still be necessary to debug a failing test, broken script or makefile, or fix or override an incomplete, faulty, or erroneous test, especially during macro development. Failures can occur at all levels, in M4 syntax or semantics, shell script issues, or due to bugs in the test or the tools invoked by ‘configure’. Together with the rather arcane error message that ‘m4’ and ‘make’ may produce when their input contains syntax errors, this can make debugging rather painful. Nevertheless, here is a list of hints and strategies that may help: • When ‘autoconf’ fails, common causes for error include: • mismatched or unbalanced parentheses or braces (*note Balancing Parentheses::), • under- or over-quoted macro arguments (*note Autoconf Language::, *note Quoting and Parameters::, *note Quotation and Nested Macros::), • spaces between macro name and opening parenthesis (*note Autoconf Language::). Typically, it helps to go back to the last working version of the input and compare the differences for each of these errors. Another possibility is to sprinkle pairs of ‘m4_traceon’ and ‘m4_traceoff’ judiciously in the code, either without a parameter or listing some macro names and watch ‘m4’ expand its input verbosely (*note Debugging via autom4te::). • Sometimes ‘autoconf’ succeeds but the generated ‘configure’ script has invalid shell syntax. You can detect this case by running ‘bash -n configure’ or ‘sh -n configure’. If this command fails, the same tips apply, as if ‘autoconf’ had failed. • Debugging ‘configure’ script execution may be done by sprinkling pairs of ‘set -x’ and ‘set +x’ into the shell script before and after the region that contains a bug. Running the whole script with ‘SHELL -vx ./configure 2>&1 | tee LOG-FILE’ with a decent SHELL may work, but produces lots of output. Here, it can help to search for markers like ‘checking for’ a particular test in the LOG-FILE. • Alternatively, you might use a shell with debugging capabilities like bashdb (http://bashdb.sourceforge.net/). • When ‘configure’ tests produce invalid results for your system, it may be necessary to override them: • For programs, tools or libraries variables, preprocessor, compiler, or linker flags, it is often sufficient to override them at ‘make’ run time with some care (*note Macros and Submakes::). Since this normally won’t cause ‘configure’ to be run again with these changed settings, it may fail if the changed variable would have caused different test results from ‘configure’, so this may work only for simple differences. • Most tests which produce their result in a substituted variable allow to override the test by setting the variable on the ‘configure’ command line (*note Compilers and Options::, *note Defining Variables::, *note Particular Systems::). • Many tests store their result in a cache variable (*note Caching Results::). This lets you override them either on the ‘configure’ command line as above, or through a primed cache or site file (*note Cache Files::, *note Site Defaults::). The name of a cache variable is documented with a test macro or may be inferred from *note Cache Variable Names::; the precise semantics of undocumented variables are often internal details, subject to change. • Alternatively, ‘configure’ may produce invalid results because of uncaught programming errors, in your package or in an upstream library package. For example, when ‘AC_CHECK_LIB’ fails to find a library with a specified function, always check ‘config.log’. This will reveal the exact error that produced the failing result: the library linked by ‘AC_CHECK_LIB’ probably has a fatal bug. Conversely, as macro author, you can make it easier for users of your macro: • by minimizing dependencies between tests and between test results as far as possible, • by using ‘make’ variables to factorize and allow override of settings at ‘make’ run time, • by honoring the GNU Coding Standards and not overriding flags reserved for the user except temporarily during ‘configure’ tests, • by not requiring users of your macro to use the cache variables. Instead, expose the result of the test via RUN-IF-TRUE and RUN-IF-FALSE parameters. If the result is not a boolean, then provide it through documented shell variables.  File: autoconf.info, Node: History, Next: GNU Free Documentation License, Prev: FAQ, Up: Top 21 History of Autoconf ********************** _This chapter was written by the original author, David MacKenzie._ You may be wondering, Why was Autoconf originally written? How did it get into its present form? (Why does it look like gorilla spit?) If you’re not wondering, then this chapter contains no information useful to you, and you might as well skip it. If you _are_ wondering, then let there be light... * Menu: * Genesis:: Prehistory and naming of ‘configure’ * Exodus:: The plagues of M4 and Perl * Leviticus:: The priestly code of portability arrives * Numbers:: Growth and contributors * Deuteronomy:: Approaching the promises of easy configuration  File: autoconf.info, Node: Genesis, Next: Exodus, Up: History 21.1 Genesis ============ In June 1991 I was maintaining many of the GNU utilities for the Free Software Foundation. As they were ported to more platforms and more programs were added, the number of ‘-D’ options that users had to select in the makefile (around 20) became burdensome. Especially for me—I had to test each new release on a bunch of different systems. So I wrote a little shell script to guess some of the correct settings for the fileutils package, and released it as part of fileutils 2.0. That ‘configure’ script worked well enough that the next month I adapted it (by hand) to create similar ‘configure’ scripts for several other GNU utilities packages. Brian Berliner also adapted one of my scripts for his CVS revision control system. Later that summer, I learned that Richard Stallman and Richard Pixley were developing similar scripts to use in the GNU compiler tools; so I adapted my ‘configure’ scripts to support their evolving interface: using the file name ‘Makefile.in’ as the templates; adding ‘+srcdir’, the first option (of many); and creating ‘config.status’ files.  File: autoconf.info, Node: Exodus, Next: Leviticus, Prev: Genesis, Up: History 21.2 Exodus =========== As I got feedback from users, I incorporated many improvements, using Emacs to search and replace, cut and paste, similar changes in each of the scripts. As I adapted more GNU utilities packages to use ‘configure’ scripts, updating them all by hand became impractical. Rich Murphey, the maintainer of the GNU graphics utilities, sent me mail saying that the ‘configure’ scripts were great, and asking if I had a tool for generating them that I could send him. No, I thought, but I should! So I started to work out how to generate them. And the journey from the slavery of hand-written ‘configure’ scripts to the abundance and ease of Autoconf began. Cygnus ‘configure’, which was being developed at around that time, is table driven; it is meant to deal mainly with a discrete number of system types with a small number of mainly unguessable features (such as details of the object file format). The automatic configuration system that Brian Fox had developed for Bash takes a similar approach. For general use, it seems to me a hopeless cause to try to maintain an up-to-date database of which features each variant of each operating system has. It’s easier and more reliable to check for most features on the fly—especially on hybrid systems that people have hacked on locally or that have patches from vendors installed. I considered using an architecture similar to that of Cygnus ‘configure’, where there is a single ‘configure’ script that reads pieces of ‘configure.in’ when run. But I didn’t want to have to distribute all of the feature tests with every package, so I settled on having a different ‘configure’ made from each ‘configure.in’ by a preprocessor. That approach also offered more control and flexibility. I looked briefly into using the Metaconfig package, by Larry Wall, Harlan Stenn, and Raphael Manfredi, but I decided not to for several reasons. The ‘Configure’ scripts it produces are interactive, which I find quite inconvenient; I didn’t like the ways it checked for some features (such as library functions); I didn’t know that it was still being maintained, and the ‘Configure’ scripts I had seen didn’t work on many modern systems (such as System V R4 and NeXT); it wasn’t flexible in what it could do in response to a feature’s presence or absence; I found it confusing to learn; and it was too big and complex for my needs (I didn’t realize then how much Autoconf would eventually have to grow). I considered using Perl to generate my style of ‘configure’ scripts, but decided that M4 was better suited to the job of simple textual substitutions: it gets in the way less, because output is implicit. Plus, everyone already has it. (Initially I didn’t rely on the GNU extensions to M4.) Also, some of my friends at the University of Maryland had recently been putting M4 front ends on several programs, including ‘tvtwm’, and I was interested in trying out a new language.  File: autoconf.info, Node: Leviticus, Next: Numbers, Prev: Exodus, Up: History 21.3 Leviticus ============== Since my ‘configure’ scripts determine the system’s capabilities automatically, with no interactive user intervention, I decided to call the program that generates them Autoconfig. But with a version number tacked on, that name would be too long for old Unix file systems, so I shortened it to Autoconf. In the fall of 1991 I called together a group of fellow questers after the Holy Grail of portability (er, that is, alpha testers) to give me feedback as I encapsulated pieces of my handwritten scripts in M4 macros and continued to add features and improve the techniques used in the checks. Prominent among the testers were François Pinard, who came up with the idea of making an Autoconf shell script to run M4 and check for unresolved macro calls; Richard Pixley, who suggested running the compiler instead of searching the file system to find include files and symbols, for more accurate results; Karl Berry, who got Autoconf to configure TeX and added the macro index to the documentation; and Ian Lance Taylor, who added support for creating a C header file as an alternative to putting ‘-D’ options in a makefile, so he could use Autoconf for his UUCP package. The alpha testers cheerfully adjusted their files again and again as the names and calling conventions of the Autoconf macros changed from release to release. They all contributed many specific checks, great ideas, and bug fixes.  File: autoconf.info, Node: Numbers, Next: Deuteronomy, Prev: Leviticus, Up: History 21.4 Numbers ============ In July 1992, after months of alpha testing, I released Autoconf 1.0, and converted many GNU packages to use it. I was surprised by how positive the reaction to it was. More people started using it than I could keep track of, including people working on software that wasn’t part of the GNU Project (such as TCL, FSP, and Kerberos V5). Autoconf continued to improve rapidly, as many people using the ‘configure’ scripts reported problems they encountered. Autoconf turned out to be a good torture test for M4 implementations. Unix M4 started to dump core because of the length of the macros that Autoconf defined, and several bugs showed up in GNU M4 as well. Eventually, we realized that we needed to use some features that only GNU M4 has. 4.3BSD M4, in particular, has an impoverished set of builtin macros; the System V version is better, but still doesn’t provide everything we need. More development occurred as people put Autoconf under more stresses (and to uses I hadn’t anticipated). Karl Berry added checks for X11. david zuhn contributed C++ support. François Pinard made it diagnose invalid arguments. Jim Blandy bravely coerced it into configuring GNU Emacs, laying the groundwork for several later improvements. Roland McGrath got it to configure the GNU C Library, wrote the ‘autoheader’ script to automate the creation of C header file templates, and added a ‘--verbose’ option to ‘configure’. Noah Friedman added the ‘--autoconf-dir’ option and ‘AC_MACRODIR’ environment variable. (He also coined the term “autoconfiscate” to mean “adapt a software package to use Autoconf”.) Roland and Noah improved the quoting protection in ‘AC_DEFINE’ and fixed many bugs, especially when I got sick of dealing with portability problems from February through June, 1993.  File: autoconf.info, Node: Deuteronomy, Prev: Numbers, Up: History 21.5 Deuteronomy ================ A long wish list for major features had accumulated, and the effect of several years of patching by various people had left some residual cruft. In April 1994, while working for Cygnus Support, I began a major revision of Autoconf. I added most of the features of the Cygnus ‘configure’ that Autoconf had lacked, largely by adapting the relevant parts of Cygnus ‘configure’ with the help of david zuhn and Ken Raeburn. These features include support for using ‘config.sub’, ‘config.guess’, ‘--host’, and ‘--target’; making links to files; and running ‘configure’ scripts in subdirectories. Adding these features enabled Ken to convert GNU ‘as’, and Rob Savoye to convert DejaGNU, to using Autoconf. I added more features in response to other peoples’ requests. Many people had asked for ‘configure’ scripts to share the results of the checks between runs, because (particularly when configuring a large source tree, like Cygnus does) they were frustratingly slow. Mike Haertel suggested adding site-specific initialization scripts. People distributing software that had to unpack on MS-DOS asked for a way to override the ‘.in’ extension on the file names, which produced file names like ‘config.h.in’ containing two dots. Jim Avera did an extensive examination of the problems with quoting in ‘AC_DEFINE’ and ‘AC_SUBST’; his insights led to significant improvements. Richard Stallman asked that compiler output be sent to ‘config.log’ instead of ‘/dev/null’, to help people debug the Emacs ‘configure’ script. I made some other changes because of my dissatisfaction with the quality of the program. I made the messages showing results of the checks less ambiguous, always printing a result. I regularized the names of the macros and cleaned up coding style inconsistencies. I added some auxiliary utilities that I had developed to help convert source code packages to use Autoconf. With the help of François Pinard, I made the macros not interrupt each others’ messages. (That feature revealed some performance bottlenecks in GNU M4, which he hastily corrected!) I reorganized the documentation around problems people want to solve. And I began a test suite, because experience had shown that Autoconf has a pronounced tendency to regress when we change it. Again, several alpha testers gave invaluable feedback, especially François Pinard, Jim Meyering, Karl Berry, Rob Savoye, Ken Raeburn, and Mark Eichin. Finally, version 2.0 was ready. And there was much rejoicing. (And I have free time again. I think. Yeah, right.)  File: autoconf.info, Node: GNU Free Documentation License, Next: Indices, Prev: History, Up: Top Appendix A GNU Free Documentation License ***************************************** Version 1.3, 3 November 2008 Copyright © 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. 0. PREAMBLE The purpose of this License is to make a manual, textbook, or other functional and useful document “free” in the sense of freedom: to assure everyone the effective freedom to copy and redistribute it, with or without modifying it, either commercially or noncommercially. Secondarily, this License preserves for the author and publisher a way to get credit for their work, while not being considered responsible for modifications made by others. This License is a kind of “copyleft”, which means that derivative works of the document must themselves be free in the same sense. It complements the GNU General Public License, which is a copyleft license designed for free software. We have designed this License in order to use it for manuals for free software, because free software needs free documentation: a free program should come with manuals providing the same freedoms that the software does. But this License is not limited to software manuals; it can be used for any textual work, regardless of subject matter or whether it is published as a printed book. We recommend this License principally for works whose purpose is instruction or reference. 1. APPLICABILITY AND DEFINITIONS This License applies to any manual or other work, in any medium, that contains a notice placed by the copyright holder saying it can be distributed under the terms of this License. Such a notice grants a world-wide, royalty-free license, unlimited in duration, to use that work under the conditions stated herein. The “Document”, below, refers to any such manual or work. Any member of the public is a licensee, and is addressed as “you”. You accept the license if you copy, modify or distribute the work in a way requiring permission under copyright law. A “Modified Version” of the Document means any work containing the Document or a portion of it, either copied verbatim, or with modifications and/or translated into another language. A “Secondary Section” is a named appendix or a front-matter section of the Document that deals exclusively with the relationship of the publishers or authors of the Document to the Document’s overall subject (or to related matters) and contains nothing that could fall directly within that overall subject. (Thus, if the Document is in part a textbook of mathematics, a Secondary Section may not explain any mathematics.) The relationship could be a matter of historical connection with the subject or with related matters, or of legal, commercial, philosophical, ethical or political position regarding them. The “Invariant Sections” are certain Secondary Sections whose titles are designated, as being those of Invariant Sections, in the notice that says that the Document is released under this License. If a section does not fit the above definition of Secondary then it is not allowed to be designated as Invariant. The Document may contain zero Invariant Sections. If the Document does not identify any Invariant Sections then there are none. The “Cover Texts” are certain short passages of text that are listed, as Front-Cover Texts or Back-Cover Texts, in the notice that says that the Document is released under this License. A Front-Cover Text may be at most 5 words, and a Back-Cover Text may be at most 25 words. A “Transparent” copy of the Document means a machine-readable copy, represented in a format whose specification is available to the general public, that is suitable for revising the document straightforwardly with generic text editors or (for images composed of pixels) generic paint programs or (for drawings) some widely available drawing editor, and that is suitable for input to text formatters or for automatic translation to a variety of formats suitable for input to text formatters. A copy made in an otherwise Transparent file format whose markup, or absence of markup, has been arranged to thwart or discourage subsequent modification by readers is not Transparent. An image format is not Transparent if used for any substantial amount of text. A copy that is not “Transparent” is called “Opaque”. Examples of suitable formats for Transparent copies include plain ASCII without markup, Texinfo input format, LaTeX input format, SGML or XML using a publicly available DTD, and standard-conforming simple HTML, PostScript or PDF designed for human modification. Examples of transparent image formats include PNG, XCF and JPG. Opaque formats include proprietary formats that can be read and edited only by proprietary word processors, SGML or XML for which the DTD and/or processing tools are not generally available, and the machine-generated HTML, PostScript or PDF produced by some word processors for output purposes only. The “Title Page” means, for a printed book, the title page itself, plus such following pages as are needed to hold, legibly, the material this License requires to appear in the title page. For works in formats which do not have any title page as such, “Title Page” means the text near the most prominent appearance of the work’s title, preceding the beginning of the body of the text. The “publisher” means any person or entity that distributes copies of the Document to the public. A section “Entitled XYZ” means a named subunit of the Document whose title either is precisely XYZ or contains XYZ in parentheses following text that translates XYZ in another language. (Here XYZ stands for a specific section name mentioned below, such as “Acknowledgements”, “Dedications”, “Endorsements”, or “History”.) To “Preserve the Title” of such a section when you modify the Document means that it remains a section “Entitled XYZ” according to this definition. The Document may include Warranty Disclaimers next to the notice which states that this License applies to the Document. These Warranty Disclaimers are considered to be included by reference in this License, but only as regards disclaiming warranties: any other implication that these Warranty Disclaimers may have is void and has no effect on the meaning of this License. 2. VERBATIM COPYING You may copy and distribute the Document in any medium, either commercially or noncommercially, provided that this License, the copyright notices, and the license notice saying this License applies to the Document are reproduced in all copies, and that you add no other conditions whatsoever to those of this License. You may not use technical measures to obstruct or control the reading or further copying of the copies you make or distribute. However, you may accept compensation in exchange for copies. If you distribute a large enough number of copies you must also follow the conditions in section 3. You may also lend copies, under the same conditions stated above, and you may publicly display copies. 3. COPYING IN QUANTITY If you publish printed copies (or copies in media that commonly have printed covers) of the Document, numbering more than 100, and the Document’s license notice requires Cover Texts, you must enclose the copies in covers that carry, clearly and legibly, all these Cover Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on the back cover. Both covers must also clearly and legibly identify you as the publisher of these copies. The front cover must present the full title with all words of the title equally prominent and visible. You may add other material on the covers in addition. Copying with changes limited to the covers, as long as they preserve the title of the Document and satisfy these conditions, can be treated as verbatim copying in other respects. If the required texts for either cover are too voluminous to fit legibly, you should put the first ones listed (as many as fit reasonably) on the actual cover, and continue the rest onto adjacent pages. If you publish or distribute Opaque copies of the Document numbering more than 100, you must either include a machine-readable Transparent copy along with each Opaque copy, or state in or with each Opaque copy a computer-network location from which the general network-using public has access to download using public-standard network protocols a complete Transparent copy of the Document, free of added material. If you use the latter option, you must take reasonably prudent steps, when you begin distribution of Opaque copies in quantity, to ensure that this Transparent copy will remain thus accessible at the stated location until at least one year after the last time you distribute an Opaque copy (directly or through your agents or retailers) of that edition to the public. It is requested, but not required, that you contact the authors of the Document well before redistributing any large number of copies, to give them a chance to provide you with an updated version of the Document. 4. MODIFICATIONS You may copy and distribute a Modified Version of the Document under the conditions of sections 2 and 3 above, provided that you release the Modified Version under precisely this License, with the Modified Version filling the role of the Document, thus licensing distribution and modification of the Modified Version to whoever possesses a copy of it. In addition, you must do these things in the Modified Version: A. Use in the Title Page (and on the covers, if any) a title distinct from that of the Document, and from those of previous versions (which should, if there were any, be listed in the History section of the Document). You may use the same title as a previous version if the original publisher of that version gives permission. B. List on the Title Page, as authors, one or more persons or entities responsible for authorship of the modifications in the Modified Version, together with at least five of the principal authors of the Document (all of its principal authors, if it has fewer than five), unless they release you from this requirement. C. State on the Title page the name of the publisher of the Modified Version, as the publisher. D. Preserve all the copyright notices of the Document. E. Add an appropriate copyright notice for your modifications adjacent to the other copyright notices. F. Include, immediately after the copyright notices, a license notice giving the public permission to use the Modified Version under the terms of this License, in the form shown in the Addendum below. G. Preserve in that license notice the full lists of Invariant Sections and required Cover Texts given in the Document’s license notice. H. Include an unaltered copy of this License. I. Preserve the section Entitled “History”, Preserve its Title, and add to it an item stating at least the title, year, new authors, and publisher of the Modified Version as given on the Title Page. If there is no section Entitled “History” in the Document, create one stating the title, year, authors, and publisher of the Document as given on its Title Page, then add an item describing the Modified Version as stated in the previous sentence. J. Preserve the network location, if any, given in the Document for public access to a Transparent copy of the Document, and likewise the network locations given in the Document for previous versions it was based on. These may be placed in the “History” section. You may omit a network location for a work that was published at least four years before the Document itself, or if the original publisher of the version it refers to gives permission. K. For any section Entitled “Acknowledgements” or “Dedications”, Preserve the Title of the section, and preserve in the section all the substance and tone of each of the contributor acknowledgements and/or dedications given therein. L. Preserve all the Invariant Sections of the Document, unaltered in their text and in their titles. Section numbers or the equivalent are not considered part of the section titles. M. Delete any section Entitled “Endorsements”. Such a section may not be included in the Modified Version. N. Do not retitle any existing section to be Entitled “Endorsements” or to conflict in title with any Invariant Section. O. Preserve any Warranty Disclaimers. If the Modified Version includes new front-matter sections or appendices that qualify as Secondary Sections and contain no material copied from the Document, you may at your option designate some or all of these sections as invariant. To do this, add their titles to the list of Invariant Sections in the Modified Version’s license notice. These titles must be distinct from any other section titles. You may add a section Entitled “Endorsements”, provided it contains nothing but endorsements of your Modified Version by various parties—for example, statements of peer review or that the text has been approved by an organization as the authoritative definition of a standard. You may add a passage of up to five words as a Front-Cover Text, and a passage of up to 25 words as a Back-Cover Text, to the end of the list of Cover Texts in the Modified Version. Only one passage of Front-Cover Text and one of Back-Cover Text may be added by (or through arrangements made by) any one entity. If the Document already includes a cover text for the same cover, previously added by you or by arrangement made by the same entity you are acting on behalf of, you may not add another; but you may replace the old one, on explicit permission from the previous publisher that added the old one. The author(s) and publisher(s) of the Document do not by this License give permission to use their names for publicity for or to assert or imply endorsement of any Modified Version. 5. COMBINING DOCUMENTS You may combine the Document with other documents released under this License, under the terms defined in section 4 above for modified versions, provided that you include in the combination all of the Invariant Sections of all of the original documents, unmodified, and list them all as Invariant Sections of your combined work in its license notice, and that you preserve all their Warranty Disclaimers. The combined work need only contain one copy of this License, and multiple identical Invariant Sections may be replaced with a single copy. If there are multiple Invariant Sections with the same name but different contents, make the title of each such section unique by adding at the end of it, in parentheses, the name of the original author or publisher of that section if known, or else a unique number. Make the same adjustment to the section titles in the list of Invariant Sections in the license notice of the combined work. In the combination, you must combine any sections Entitled “History” in the various original documents, forming one section Entitled “History”; likewise combine any sections Entitled “Acknowledgements”, and any sections Entitled “Dedications”. You must delete all sections Entitled “Endorsements.” 6. COLLECTIONS OF DOCUMENTS You may make a collection consisting of the Document and other documents released under this License, and replace the individual copies of this License in the various documents with a single copy that is included in the collection, provided that you follow the rules of this License for verbatim copying of each of the documents in all other respects. You may extract a single document from such a collection, and distribute it individually under this License, provided you insert a copy of this License into the extracted document, and follow this License in all other respects regarding verbatim copying of that document. 7. AGGREGATION WITH INDEPENDENT WORKS A compilation of the Document or its derivatives with other separate and independent documents or works, in or on a volume of a storage or distribution medium, is called an “aggregate” if the copyright resulting from the compilation is not used to limit the legal rights of the compilation’s users beyond what the individual works permit. When the Document is included in an aggregate, this License does not apply to the other works in the aggregate which are not themselves derivative works of the Document. If the Cover Text requirement of section 3 is applicable to these copies of the Document, then if the Document is less than one half of the entire aggregate, the Document’s Cover Texts may be placed on covers that bracket the Document within the aggregate, or the electronic equivalent of covers if the Document is in electronic form. Otherwise they must appear on printed covers that bracket the whole aggregate. 8. TRANSLATION Translation is considered a kind of modification, so you may distribute translations of the Document under the terms of section 4. Replacing Invariant Sections with translations requires special permission from their copyright holders, but you may include translations of some or all Invariant Sections in addition to the original versions of these Invariant Sections. You may include a translation of this License, and all the license notices in the Document, and any Warranty Disclaimers, provided that you also include the original English version of this License and the original versions of those notices and disclaimers. In case of a disagreement between the translation and the original version of this License or a notice or disclaimer, the original version will prevail. If a section in the Document is Entitled “Acknowledgements”, “Dedications”, or “History”, the requirement (section 4) to Preserve its Title (section 1) will typically require changing the actual title. 9. TERMINATION You may not copy, modify, sublicense, or distribute the Document except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, or distribute it is void, and will automatically terminate your rights under this License. However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, receipt of a copy of some or all of the same material does not give you any rights to use it. 10. FUTURE REVISIONS OF THIS LICENSE The Free Software Foundation may publish new, revised versions of the GNU Free Documentation License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. See . Each version of the License is given a distinguishing version number. If the Document specifies that a particular numbered version of this License “or any later version” applies to it, you have the option of following the terms and conditions either of that specified version or of any later version that has been published (not as a draft) by the Free Software Foundation. If the Document does not specify a version number of this License, you may choose any version ever published (not as a draft) by the Free Software Foundation. If the Document specifies that a proxy can decide which future versions of this License can be used, that proxy’s public statement of acceptance of a version permanently authorizes you to choose that version for the Document. 11. RELICENSING “Massive Multiauthor Collaboration Site” (or “MMC Site”) means any World Wide Web server that publishes copyrightable works and also provides prominent facilities for anybody to edit those works. A public wiki that anybody can edit is an example of such a server. A “Massive Multiauthor Collaboration” (or “MMC”) contained in the site means any set of copyrightable works thus published on the MMC site. “CC-BY-SA” means the Creative Commons Attribution-Share Alike 3.0 license published by Creative Commons Corporation, a not-for-profit corporation with a principal place of business in San Francisco, California, as well as future copyleft versions of that license published by that same organization. “Incorporate” means to publish or republish a Document, in whole or in part, as part of another Document. An MMC is “eligible for relicensing” if it is licensed under this License, and if all works that were first published under this License somewhere other than this MMC, and subsequently incorporated in whole or in part into the MMC, (1) had no cover texts or invariant sections, and (2) were thus incorporated prior to November 1, 2008. The operator of an MMC Site may republish an MMC contained in the site under CC-BY-SA on the same site at any time before August 1, 2009, provided the MMC is eligible for relicensing. ADDENDUM: How to use this License for your documents ==================================================== To use this License in a document you have written, include a copy of the License in the document and put the following copyright and license notices just after the title page: Copyright (C) YEAR YOUR NAME. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitled ``GNU Free Documentation License''. If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, replace the “with...Texts.” line with this: with the Invariant Sections being LIST THEIR TITLES, with the Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. If you have Invariant Sections without Cover Texts, or some other combination of the three, merge those two alternatives to suit the situation. If your document contains nontrivial examples of program code, we recommend releasing these examples in parallel under your choice of free software license, such as the GNU General Public License, to permit their use in free software.  File: autoconf.info, Node: Indices, Prev: GNU Free Documentation License, Up: Top Appendix B Indices ****************** * Menu: * Environment Variable Index:: Index of environment variables used * Output Variable Index:: Index of variables set in output files * Preprocessor Symbol Index:: Index of C preprocessor symbols defined * Cache Variable Index:: Index of documented cache variables * Autoconf Macro Index:: Index of Autoconf macros * M4 Macro Index:: Index of M4, M4sugar, and M4sh macros * Autotest Macro Index:: Index of Autotest macros * Program & Function Index:: Index of those with portability problems * Concept Index:: General index  File: autoconf.info, Node: Environment Variable Index, Next: Output Variable Index, Up: Indices B.1 Environment Variable Index ============================== This is an alphabetical list of the environment variables that might influence Autoconf checks. [index] * Menu: * _: Special Shell Variables. (line 36) * BIN_SH: Special Shell Variables. (line 40) * CC: C Compiler. (line 61) * CDPATH: Special Shell Variables. (line 44) * CFLAGS: Preset Output Variables. (line 23) * CFLAGS <1>: C Compiler. (line 61) * CLICOLOR_FORCE: Special Shell Variables. (line 67) * CONFIG_COMMANDS: Obsolete config.status Use. (line 11) * CONFIG_FILES: Obsolete config.status Use. (line 15) * CONFIG_HEADERS: Obsolete config.status Use. (line 20) * CONFIG_LINKS: Obsolete config.status Use. (line 25) * CONFIG_SHELL: config.status Invocation. (line 101) * CONFIG_SITE: Site Defaults. (line 10) * CONFIG_STATUS: config.status Invocation. (line 107) * CPP: C Compiler. (line 138) * CPPFLAGS: Preset Output Variables. (line 75) * CXX: C++ Compiler. (line 7) * CXXCPP: C++ Compiler. (line 66) * CXXFLAGS: Preset Output Variables. (line 97) * CXXFLAGS <1>: C++ Compiler. (line 7) * CYGWIN: Obsolete Macros. (line 124) * DUALCASE: Special Shell Variables. (line 74) * ENV: Special Shell Variables. (line 84) * ERL: Erlang Compiler and Interpreter. (line 29) * ERLC: Erlang Compiler and Interpreter. (line 10) * ERLCFLAGS: Preset Output Variables. (line 123) * ERLCFLAGS <1>: Erlang Compiler and Interpreter. (line 10) * F77: Fortran Compiler. (line 19) * FC: Fortran Compiler. (line 44) * FCFLAGS: Preset Output Variables. (line 129) * FCFLAGS <1>: Fortran Compiler. (line 44) * FFLAGS: Preset Output Variables. (line 136) * FFLAGS <1>: Fortran Compiler. (line 19) * FPATH: Special Shell Variables. (line 100) * GOFLAGS: Preset Output Variables. (line 173) * GREP_OPTIONS: Special Shell Variables. (line 107) * IFS: Special Shell Variables. (line 115) * LANG: Special Shell Variables. (line 159) * LANGUAGE: Special Shell Variables. (line 167) * LC_ADDRESS: Special Shell Variables. (line 178) * LC_ALL: Initialization Macros. (line 14) * LC_ALL <1>: Special Shell Variables. (line 159) * LC_COLLATE: Special Shell Variables. (line 159) * LC_CTYPE: Special Shell Variables. (line 159) * LC_IDENTIFICATION: Special Shell Variables. (line 178) * LC_MEASUREMENT: Special Shell Variables. (line 178) * LC_MESSAGES: Special Shell Variables. (line 159) * LC_MONETARY: Special Shell Variables. (line 159) * LC_NAME: Special Shell Variables. (line 178) * LC_NUMERIC: Special Shell Variables. (line 159) * LC_PAPER: Special Shell Variables. (line 178) * LC_TELEPHONE: Special Shell Variables. (line 178) * LC_TIME: Special Shell Variables. (line 159) * LDFLAGS: Preset Output Variables. (line 143) * LIBS: Preset Output Variables. (line 157) * LINENO: Initialization Macros. (line 67) * LINENO <1>: Special Shell Variables. (line 184) * M4: autom4te Invocation. (line 10) * MAIL: Special Shell Variables. (line 84) * MAILPATH: Special Shell Variables. (line 84) * NULLCMD: Special Shell Variables. (line 313) * OBJC: Objective C Compiler. (line 7) * OBJCFLAGS: Preset Output Variables. (line 165) * OBJCFLAGS <1>: Objective C Compiler. (line 7) * OBJCPP: Objective C Compiler. (line 27) * OBJCXX: Objective C++ Compiler. (line 7) * OBJCXXCPP: Objective C++ Compiler. (line 27) * OBJCXXFLAGS: Preset Output Variables. (line 169) * OBJCXXFLAGS <1>: Objective C++ Compiler. (line 7) * options: Special Shell Variables. (line 320) * PATH_SEPARATOR: Special Shell Variables. (line 324) * POSIXLY_CORRECT: Special Shell Variables. (line 333) * PS1: Special Shell Variables. (line 84) * PS2: Special Shell Variables. (line 84) * PS4: Special Shell Variables. (line 84) * PWD: Special Shell Variables. (line 348) * RANDOM: Special Shell Variables. (line 357) * SHELL: Initialization Macros. (line 14) * SIMPLE_BACKUP_SUFFIX: autoupdate Invocation. (line 16) * status: Special Shell Variables. (line 365) * TMPDIR: Initialization Macros. (line 77) * WARNINGS: autoconf Invocation. (line 62) * WARNINGS <1>: autoreconf Invocation. (line 107) * WARNINGS <2>: autoheader Invocation. (line 82) * WARNINGS <3>: autom4te Invocation. (line 58) * XMKMF: System Services. (line 10) * YACC: Particular Programs. (line 241) * YFLAGS: Particular Programs. (line 241)  File: autoconf.info, Node: Output Variable Index, Next: Preprocessor Symbol Index, Prev: Environment Variable Index, Up: Indices B.2 Output Variable Index ========================= This is an alphabetical list of the variables that Autoconf can substitute into files that it creates, typically one or more makefiles. *Note Setting Output Variables::, for more information on how this is done. [index] * Menu: * abs_builddir: Preset Output Variables. (line 180) * abs_srcdir: Preset Output Variables. (line 202) * abs_top_builddir: Preset Output Variables. (line 195) * abs_top_srcdir: Preset Output Variables. (line 209) * ac_empty: Fortran Compiler. (line 468) * ALLOCA: Particular Functions. (line 10) * AWK: Particular Programs. (line 10) * bindir: Installation Directory Variables. (line 15) * build: Canonicalizing. (line 21) * builddir: Preset Output Variables. (line 177) * build_alias: Canonicalizing. (line 9) * build_cpu: Canonicalizing. (line 21) * build_os: Canonicalizing. (line 21) * build_vendor: Canonicalizing. (line 21) * CC: C Compiler. (line 61) * CC <1>: C Compiler. (line 358) * CC <2>: System Services. (line 48) * CFLAGS: Preset Output Variables. (line 23) * CFLAGS <1>: C Compiler. (line 61) * configure_input: Preset Output Variables. (line 61) * CPP: C Compiler. (line 138) * CPP <1>: C Compiler. (line 153) * CPPFLAGS: Preset Output Variables. (line 75) * cross_compiling: Runtime. (line 71) * CXX: C++ Compiler. (line 7) * CXXCPP: C++ Compiler. (line 66) * CXXFLAGS: Preset Output Variables. (line 97) * CXXFLAGS <1>: C++ Compiler. (line 7) * datadir: Installation Directory Variables. (line 18) * datarootdir: Installation Directory Variables. (line 22) * DEFS: Preset Output Variables. (line 101) * docdir: Installation Directory Variables. (line 26) * dvidir: Installation Directory Variables. (line 30) * ECHO_C: Preset Output Variables. (line 111) * ECHO_N: Preset Output Variables. (line 111) * ECHO_T: Preset Output Variables. (line 111) * EGREP: Particular Programs. (line 29) * ERL: Erlang Compiler and Interpreter. (line 29) * ERL <1>: Language Choice. (line 40) * ERL <2>: Running the Compiler. (line 30) * ERLANG_ERTS_VER: Erlang Libraries. (line 12) * ERLANG_INSTALL_LIB_DIR: Installation Directory Variables. (line 210) * ERLANG_INSTALL_LIB_DIR <1>: Erlang Libraries. (line 86) * ERLANG_INSTALL_LIB_DIR_LIBRARY: Installation Directory Variables. (line 215) * ERLANG_INSTALL_LIB_DIR_LIBRARY <1>: Erlang Libraries. (line 94) * ERLANG_LIB_DIR: Erlang Libraries. (line 28) * ERLANG_LIB_DIR_LIBRARY: Erlang Libraries. (line 36) * ERLANG_LIB_VER_LIBRARY: Erlang Libraries. (line 36) * ERLANG_ROOT_DIR: Erlang Libraries. (line 22) * ERLC: Erlang Compiler and Interpreter. (line 10) * ERLC <1>: Language Choice. (line 40) * ERLCFLAGS: Preset Output Variables. (line 123) * ERLCFLAGS <1>: Erlang Compiler and Interpreter. (line 10) * ERLCFLAGS <2>: Language Choice. (line 40) * exec_prefix: Installation Directory Variables. (line 33) * EXEEXT: Compilers and Preprocessors. (line 6) * EXEEXT <1>: Obsolete Macros. (line 181) * F77: Fortran Compiler. (line 19) * FC: Fortran Compiler. (line 44) * FCFLAGS: Preset Output Variables. (line 129) * FCFLAGS <1>: Fortran Compiler. (line 44) * FCLIBS: Fortran Compiler. (line 91) * FC_MODEXT: Fortran Compiler. (line 440) * FC_MODINC: Fortran Compiler. (line 468) * FC_MODOUT: Fortran Compiler. (line 505) * FFLAGS: Preset Output Variables. (line 136) * FFLAGS <1>: Fortran Compiler. (line 19) * FGREP: Particular Programs. (line 36) * FLIBS: Fortran Compiler. (line 91) * GETGROUPS_LIBS: Particular Functions. (line 145) * GETLOADAVG_LIBS: Particular Functions. (line 157) * GOFLAGS: Preset Output Variables. (line 173) * GREP: Particular Programs. (line 20) * host: Canonicalizing. (line 29) * host_alias: Canonicalizing. (line 9) * host_cpu: Canonicalizing. (line 29) * host_os: Canonicalizing. (line 29) * host_vendor: Canonicalizing. (line 29) * htmldir: Installation Directory Variables. (line 40) * includedir: Installation Directory Variables. (line 43) * infodir: Installation Directory Variables. (line 46) * INSTALL: Particular Programs. (line 43) * INSTALL_DATA: Particular Programs. (line 43) * INSTALL_PROGRAM: Particular Programs. (line 43) * INSTALL_SCRIPT: Particular Programs. (line 43) * KMEM_GROUP: Particular Functions. (line 157) * LDFLAGS: Preset Output Variables. (line 143) * LEX: Particular Programs. (line 114) * LEXLIB: Particular Programs. (line 114) * LEX_OUTPUT_ROOT: Particular Programs. (line 114) * libdir: Installation Directory Variables. (line 49) * libexecdir: Installation Directory Variables. (line 52) * LIBOBJDIR: AC_LIBOBJ vs LIBOBJS. (line 35) * LIBOBJS: Particular Functions. (line 157) * LIBOBJS <1>: Particular Functions. (line 292) * LIBOBJS <2>: Particular Functions. (line 305) * LIBOBJS <3>: Generic Functions. (line 56) * LIBOBJS <4>: Generic Functions. (line 117) * LIBOBJS <5>: Particular Structures. (line 26) * LIBS: Preset Output Variables. (line 157) * LIBS <1>: Obsolete Macros. (line 312) * LIBS <2>: Obsolete Macros. (line 514) * LIBS <3>: Obsolete Macros. (line 763) * LN_S: Particular Programs. (line 209) * localedir: Installation Directory Variables. (line 55) * localstatedir: Installation Directory Variables. (line 60) * mandir: Installation Directory Variables. (line 72) * MKDIR_P: Particular Programs. (line 80) * NEED_SETGID: Particular Functions. (line 157) * OBJC: Objective C Compiler. (line 7) * OBJCFLAGS: Preset Output Variables. (line 165) * OBJCFLAGS <1>: Objective C Compiler. (line 7) * OBJCPP: Objective C Compiler. (line 27) * OBJCXX: Objective C++ Compiler. (line 7) * OBJCXXCPP: Objective C++ Compiler. (line 27) * OBJCXXFLAGS: Preset Output Variables. (line 169) * OBJCXXFLAGS <1>: Objective C++ Compiler. (line 7) * OBJEXT: Compilers and Preprocessors. (line 10) * OBJEXT <1>: Obsolete Macros. (line 400) * oldincludedir: Installation Directory Variables. (line 75) * OPENMP_CFLAGS: Generic Compiler Characteristics. (line 64) * OPENMP_CXXFLAGS: Generic Compiler Characteristics. (line 64) * OPENMP_FCFLAGS: Generic Compiler Characteristics. (line 64) * OPENMP_FFLAGS: Generic Compiler Characteristics. (line 64) * PACKAGE_BUGREPORT: Initializing configure. (line 117) * PACKAGE_NAME: Initializing configure. (line 105) * PACKAGE_STRING: Initializing configure. (line 114) * PACKAGE_TARNAME: Initializing configure. (line 108) * PACKAGE_URL: Initializing configure. (line 121) * PACKAGE_VERSION: Initializing configure. (line 111) * pdfdir: Installation Directory Variables. (line 78) * POW_LIB: Particular Functions. (line 431) * prefix: Installation Directory Variables. (line 81) * program_transform_name: Transforming Names. (line 11) * psdir: Installation Directory Variables. (line 86) * RANLIB: Particular Programs. (line 228) * runstatedir: Installation Directory Variables. (line 64) * sbindir: Installation Directory Variables. (line 89) * SED: Particular Programs. (line 232) * SET_MAKE: Output. (line 45) * sharedstatedir: Installation Directory Variables. (line 93) * srcdir: Preset Output Variables. (line 198) * subdirs: Subdirectories. (line 12) * sysconfdir: Installation Directory Variables. (line 97) * target: Canonicalizing. (line 36) * target_alias: Canonicalizing. (line 9) * target_cpu: Canonicalizing. (line 36) * target_os: Canonicalizing. (line 36) * target_vendor: Canonicalizing. (line 36) * tmp: Initialization Macros. (line 77) * top_builddir: Preset Output Variables. (line 183) * top_build_prefix: Preset Output Variables. (line 187) * top_srcdir: Preset Output Variables. (line 205) * X_CFLAGS: System Services. (line 30) * X_EXTRA_LIBS: System Services. (line 30) * X_LIBS: System Services. (line 30) * X_PRE_LIBS: System Services. (line 30) * YACC: Particular Programs. (line 241)  File: autoconf.info, Node: Preprocessor Symbol Index, Next: Cache Variable Index, Prev: Output Variable Index, Up: Indices B.3 Preprocessor Symbol Index ============================= This is an alphabetical list of the C preprocessor symbols that the Autoconf macros define. To work with Autoconf, C source code needs to use these names in ‘#if’ or ‘#ifdef’ directives. [index] * Menu: * _ALL_SOURCE: C and Posix Variants. (line 21) * _ALL_SOURCE <1>: Obsolete Macros. (line 20) * _DARWIN_C_SOURCE: C and Posix Variants. (line 23) * _FILE_OFFSET_BITS: System Services. (line 48) * _Generic: C Compiler. (line 235) * _GNU_SOURCE: C and Posix Variants. (line 25) * _GNU_SOURCE <1>: Obsolete Macros. (line 240) * _LARGEFILE_SOURCE: Particular Functions. (line 133) * _LARGE_FILES: System Services. (line 48) * _MINIX: C and Posix Variants. (line 63) * _MINIX <1>: Obsolete Macros. (line 387) * _NETBSD_SOURCE: C and Posix Variants. (line 27) * _OPENBSD_SOURCE: C and Posix Variants. (line 30) * _OPENMP: Generic Compiler Characteristics. (line 64) * _POSIX_1_SOURCE: C and Posix Variants. (line 63) * _POSIX_1_SOURCE <1>: Obsolete Macros. (line 387) * _POSIX_PTHREAD_SEMANTICS: C and Posix Variants. (line 33) * _POSIX_SOURCE: C and Posix Variants. (line 63) * _POSIX_SOURCE <1>: Obsolete Macros. (line 387) * _POSIX_VERSION: Particular Headers. (line 196) * _TANDEM_SOURCE: C and Posix Variants. (line 49) * _XOPEN_SOURCE: C and Posix Variants. (line 70) * __CHAR_UNSIGNED__: C Compiler. (line 283) * __EXTENSIONS__: C and Posix Variants. (line 56) * __PROTOTYPES: C Compiler. (line 348) * __STDC_NO_VLA__: C Compiler. (line 331) * __STDC_WANT_DEC_FP__: C and Posix Variants. (line 74) * __STDC_WANT_IEC_60559_ATTRIBS_EXT__: C and Posix Variants. (line 35) * __STDC_WANT_IEC_60559_BFP_EXT__: C and Posix Variants. (line 37) * __STDC_WANT_IEC_60559_DFP_EXT__: C and Posix Variants. (line 39) * __STDC_WANT_IEC_60559_FUNCS_EXT__: C and Posix Variants. (line 41) * __STDC_WANT_IEC_60559_TYPES_EXT__: C and Posix Variants. (line 43) * __STDC_WANT_LIB_EXT1__: C and Posix Variants. (line 77) * __STDC_WANT_LIB_EXT2__: C and Posix Variants. (line 45) * __STDC_WANT_MATH_SPEC_FUNCS__: C and Posix Variants. (line 47) * ALIGNOF_TYPE: Generic Compiler Characteristics. (line 30) * CLOSEDIR_VOID: Particular Functions. (line 58) * const: C Compiler. (line 205) * CXX_NO_MINUS_C_MINUS_O: C++ Compiler. (line 83) * C_ALLOCA: Particular Functions. (line 10) * C_GETLOADAVG: Particular Functions. (line 157) * DGUX: Particular Functions. (line 157) * DIRENT: Obsolete Macros. (line 161) * F77_DUMMY_MAIN: Fortran Compiler. (line 129) * F77_FUNC: Fortran Compiler. (line 199) * F77_FUNC_: Fortran Compiler. (line 199) * F77_MAIN: Fortran Compiler. (line 174) * F77_NO_MINUS_C_MINUS_O: Fortran Compiler. (line 75) * FC_DUMMY_MAIN: Fortran Compiler. (line 129) * FC_FUNC: Fortran Compiler. (line 199) * FC_FUNC_: Fortran Compiler. (line 199) * FC_MAIN: Fortran Compiler. (line 174) * FC_NO_MINUS_C_MINUS_O: Fortran Compiler. (line 75) * FLEXIBLE_ARRAY_MEMBER: C Compiler. (line 307) * GETGROUPS_T: Particular Types. (line 14) * GETLOADAVG_PRIVILEGED: Particular Functions. (line 157) * GETPGRP_VOID: Particular Functions. (line 204) * gid_t: Particular Types. (line 126) * GWINSZ_IN_SYS_IOCTL: Particular Headers. (line 212) * HAVE_AGGREGATE_MEMBER: Generic Structures. (line 29) * HAVE_ALLOCA_H: Particular Functions. (line 10) * HAVE_CHOWN: Particular Functions. (line 48) * HAVE_CONFIG_H: Configuration Headers. (line 37) * HAVE_C_BACKSLASH_A: C Compiler. (line 164) * HAVE_C_VARARRAYS: C Compiler. (line 331) * HAVE_DECL_STRERROR_R: Particular Functions. (line 408) * HAVE_DECL_SYMBOL: Generic Declarations. (line 34) * HAVE_DECL_SYMBOL <1>: Generic Declarations. (line 79) * HAVE_DECL_TZNAME: Particular Structures. (line 43) * HAVE_DIRENT_H: Particular Headers. (line 25) * HAVE_DOPRNT: Particular Functions. (line 473) * HAVE_FSEEKO: Particular Functions. (line 133) * HAVE_FUNCTION: Generic Functions. (line 27) * HAVE_FUNCTION <1>: Generic Functions. (line 38) * HAVE_FUNCTION <2>: Generic Functions. (line 117) * HAVE_GETGROUPS: Particular Functions. (line 145) * HAVE_GETMNTENT: Particular Functions. (line 191) * HAVE_HEADER: Generic Headers. (line 39) * HAVE_HEADER <1>: Generic Headers. (line 56) * HAVE_INT16_T: Particular Types. (line 40) * HAVE_INT32_T: Particular Types. (line 43) * HAVE_INT64_T: Particular Types. (line 46) * HAVE_INT8_T: Particular Types. (line 21) * HAVE_INTMAX_T: Particular Types. (line 49) * HAVE_INTPTR_T: Particular Types. (line 54) * HAVE_LONG_DOUBLE: Particular Types. (line 59) * HAVE_LONG_DOUBLE <1>: Obsolete Macros. (line 33) * HAVE_LONG_DOUBLE_WIDER: Particular Types. (line 70) * HAVE_LONG_FILE_NAMES: System Services. (line 72) * HAVE_LONG_LONG_INT: Particular Types. (line 78) * HAVE_LSTAT_EMPTY_STRING_BUG: Particular Functions. (line 382) * HAVE_MALLOC: Particular Functions. (line 246) * HAVE_MBRTOWC: Particular Functions. (line 281) * HAVE_MMAP: Particular Functions. (line 317) * HAVE_NDIR_H: Particular Headers. (line 25) * HAVE_NLIST_H: Particular Functions. (line 157) * HAVE_OBSTACK: Particular Functions. (line 329) * HAVE_REALLOC: Particular Functions. (line 339) * HAVE_RESOLV_H: Particular Headers. (line 99) * HAVE_RESTARTABLE_SYSCALLS: Obsolete Macros. (line 575) * HAVE_STAT_EMPTY_STRING_BUG: Particular Functions. (line 382) * HAVE_STDBOOL_H: Particular Headers. (line 127) * HAVE_STRCOLL: Particular Functions. (line 398) * HAVE_STRERROR_R: Particular Functions. (line 408) * HAVE_STRFTIME: Particular Functions. (line 424) * HAVE_STRINGIZE: C Compiler. (line 297) * HAVE_STRNLEN: Particular Functions. (line 453) * HAVE_STRTOLD: Particular Functions. (line 443) * HAVE_STRUCT_DIRENT_D_INO: Particular Structures. (line 9) * HAVE_STRUCT_DIRENT_D_TYPE: Particular Structures. (line 21) * HAVE_STRUCT_STAT_ST_BLKSIZE: Obsolete Macros. (line 548) * HAVE_STRUCT_STAT_ST_BLOCKS: Particular Structures. (line 26) * HAVE_STRUCT_STAT_ST_RDEV: Obsolete Macros. (line 557) * HAVE_STRUCT_TM_TM_ZONE: Particular Structures. (line 43) * HAVE_ST_BLKSIZE: Obsolete Macros. (line 548) * HAVE_ST_BLOCKS: Particular Structures. (line 26) * HAVE_ST_RDEV: Obsolete Macros. (line 557) * HAVE_SYS_DIR_H: Particular Headers. (line 25) * HAVE_SYS_NDIR_H: Particular Headers. (line 25) * HAVE_SYS_WAIT_H: Particular Headers. (line 172) * HAVE_TM_ZONE: Particular Structures. (line 43) * HAVE_TYPE: Generic Types. (line 27) * HAVE_TYPEOF: C Compiler. (line 341) * HAVE_TZNAME: Particular Structures. (line 43) * HAVE_UINT16_T: Particular Types. (line 138) * HAVE_UINT32_T: Particular Types. (line 141) * HAVE_UINT64_T: Particular Types. (line 144) * HAVE_UINT8_T: Particular Types. (line 132) * HAVE_UINTMAX_T: Particular Types. (line 147) * HAVE_UINTPTR_T: Particular Types. (line 152) * HAVE_UNSIGNED_LONG_LONG_INT: Particular Types. (line 157) * HAVE_UTIME_NULL: Particular Functions. (line 463) * HAVE_VFORK_H: Particular Functions. (line 109) * HAVE_VPRINTF: Particular Functions. (line 473) * HAVE_WAIT3: Obsolete Macros. (line 222) * HAVE_WORKING_FORK: Particular Functions. (line 109) * HAVE_WORKING_VFORK: Particular Functions. (line 109) * HAVE__BOOL: Particular Headers. (line 10) * HAVE__BOOL <1>: Particular Headers. (line 127) * inline: C Compiler. (line 278) * int16_t: Particular Types. (line 40) * int32_t: Particular Types. (line 43) * int64_t: Particular Types. (line 46) * int8_t: Particular Types. (line 21) * intmax_t: Particular Types. (line 49) * intptr_t: Particular Types. (line 54) * INT_16_BITS: Obsolete Macros. (line 292) * LONG_64_BITS: Obsolete Macros. (line 354) * LSTAT_FOLLOWS_SLASHED_SYMLINK: Particular Functions. (line 227) * MAJOR_IN_MKDEV: Particular Headers. (line 68) * MAJOR_IN_SYSMACROS: Particular Headers. (line 68) * malloc: Particular Functions. (line 246) * mbstate_t: Particular Types. (line 88) * mode_t: Particular Types. (line 96) * NDEBUG: Particular Headers. (line 20) * NDIR: Obsolete Macros. (line 161) * NEED_MEMORY_H: Obsolete Macros. (line 375) * NEED_SETGID: Particular Functions. (line 157) * NLIST_NAME_UNION: Particular Functions. (line 157) * NO_MINUS_C_MINUS_O: C Compiler. (line 127) * off_t: Particular Types. (line 102) * PACKAGE_BUGREPORT: Initializing configure. (line 117) * PACKAGE_NAME: Initializing configure. (line 105) * PACKAGE_STRING: Initializing configure. (line 114) * PACKAGE_TARNAME: Initializing configure. (line 108) * PACKAGE_URL: Initializing configure. (line 121) * PACKAGE_VERSION: Initializing configure. (line 111) * PARAMS: C Compiler. (line 348) * pid_t: Particular Types. (line 108) * PROTOTYPES: C Compiler. (line 348) * realloc: Particular Functions. (line 339) * restrict: C Compiler. (line 239) * RETSIGTYPE: Obsolete Macros. (line 691) * SELECT_TYPE_ARG1: Particular Functions. (line 353) * SELECT_TYPE_ARG234: Particular Functions. (line 353) * SELECT_TYPE_ARG5: Particular Functions. (line 353) * SETPGRP_VOID: Particular Functions. (line 364) * SETVBUF_REVERSED: Obsolete Macros. (line 214) * SIZEOF_TYPE-OR-EXPR: Generic Compiler Characteristics. (line 8) * size_t: Particular Types. (line 114) * ssize_t: Particular Types. (line 120) * STAT_MACROS_BROKEN: Particular Headers. (line 118) * STDC_HEADERS: Particular Headers. (line 161) * STRERROR_R_CHAR_P: Particular Functions. (line 408) * SVR4: Particular Functions. (line 157) * SYSDIR: Obsolete Macros. (line 161) * SYSNDIR: Obsolete Macros. (line 161) * SYS_SIGLIST_DECLARED: Obsolete Macros. (line 141) * TIME_WITH_SYS_TIME: Obsolete Macros. (line 267) * TM_IN_SYS_TIME: Particular Structures. (line 35) * typeof: C Compiler. (line 341) * uid_t: Particular Types. (line 126) * uint16_t: Particular Types. (line 138) * uint32_t: Particular Types. (line 141) * uint64_t: Particular Types. (line 144) * uint8_t: Particular Types. (line 132) * uintmax_t: Particular Types. (line 147) * uintptr_t: Particular Types. (line 152) * UMAX: Particular Functions. (line 157) * UMAX4_3: Particular Functions. (line 157) * USG: Obsolete Macros. (line 717) * VARIABLE: Defining Symbols. (line 32) * VARIABLE <1>: Defining Symbols. (line 74) * vfork: Particular Functions. (line 109) * volatile: C Compiler. (line 257) * WORDS_BIGENDIAN: C Compiler. (line 172) * X_DISPLAY_MISSING: System Services. (line 30) * YYTEXT_POINTER: Particular Programs. (line 114)  File: autoconf.info, Node: Cache Variable Index, Next: Autoconf Macro Index, Prev: Preprocessor Symbol Index, Up: Indices B.4 Cache Variable Index ======================== This is an alphabetical list of documented cache variables used by macros defined in Autoconf. Autoconf macros may use additional cache variables internally. [index] * Menu: * ac_cv_alignof_TYPE-OR-EXPR: Generic Compiler Characteristics. (line 30) * ac_cv_c_const: C Compiler. (line 205) * ac_cv_c_int16_t: Particular Types. (line 40) * ac_cv_c_int32_t: Particular Types. (line 43) * ac_cv_c_int64_t: Particular Types. (line 46) * ac_cv_c_int8_t: Particular Types. (line 21) * ac_cv_c_restrict: C Compiler. (line 239) * ac_cv_c_uint16_t: Particular Types. (line 138) * ac_cv_c_uint32_t: Particular Types. (line 141) * ac_cv_c_uint64_t: Particular Types. (line 144) * ac_cv_c_uint8_t: Particular Types. (line 132) * ac_cv_f77_compiler_gnu: Fortran Compiler. (line 19) * ac_cv_f77_dummy_main: Fortran Compiler. (line 129) * ac_cv_f77_implicit_none: Fortran Compiler. (line 428) * ac_cv_f77_libs: Fortran Compiler. (line 91) * ac_cv_f77_main: Fortran Compiler. (line 174) * ac_cv_f77_mangling: Fortran Compiler. (line 199) * ac_cv_fc_check_bounds: Fortran Compiler. (line 413) * ac_cv_fc_compiler_gnu: Fortran Compiler. (line 44) * ac_cv_fc_dummy_main: Fortran Compiler. (line 129) * ac_cv_fc_fixedform: Fortran Compiler. (line 373) * ac_cv_fc_freeform: Fortran Compiler. (line 348) * ac_cv_fc_implicit_none: Fortran Compiler. (line 428) * ac_cv_fc_libs: Fortran Compiler. (line 91) * ac_cv_fc_line_length: Fortran Compiler. (line 395) * ac_cv_fc_main: Fortran Compiler. (line 174) * ac_cv_fc_mangling: Fortran Compiler. (line 199) * ac_cv_fc_module_ext: Fortran Compiler. (line 440) * ac_cv_fc_module_flag: Fortran Compiler. (line 468) * ac_cv_fc_module_output_flag: Fortran Compiler. (line 505) * ac_cv_fc_pp_define: Fortran Compiler. (line 332) * ac_cv_fc_pp_srcext_EXT: Fortran Compiler. (line 275) * ac_cv_fc_srcext_EXT: Fortran Compiler. (line 275) * ac_cv_file_FILE: Files. (line 13) * ac_cv_file_FILE <1>: Files. (line 21) * ac_cv_func_chown_works: Particular Functions. (line 48) * ac_cv_func_closedir_void: Particular Functions. (line 58) * ac_cv_func_fnmatch_gnu: Particular Functions. (line 98) * ac_cv_func_fnmatch_works: Particular Functions. (line 83) * ac_cv_func_fnmatch_works <1>: Particular Functions. (line 482) * ac_cv_func_FUNCTION: Generic Functions. (line 15) * ac_cv_func_getgroups_works: Particular Functions. (line 145) * ac_cv_func_getpgrp_void: Particular Functions. (line 204) * ac_cv_func_lstat_dereferences_slashed_symlink: Particular Functions. (line 227) * ac_cv_func_lstat_empty_string_bug: Particular Functions. (line 382) * ac_cv_func_malloc_0_nonnull: Particular Functions. (line 246) * ac_cv_func_mbrtowc: Particular Functions. (line 281) * ac_cv_func_memcmp_working: Particular Functions. (line 292) * ac_cv_func_mmap_fixed_mapped: Particular Functions. (line 317) * ac_cv_func_obstack: Particular Functions. (line 329) * ac_cv_func_pow: Particular Functions. (line 431) * ac_cv_func_realloc_0_nonnull: Particular Functions. (line 339) * ac_cv_func_setpgrp_void: Particular Functions. (line 364) * ac_cv_func_stat_empty_string_bug: Particular Functions. (line 382) * ac_cv_func_strcoll_works: Particular Functions. (line 398) * ac_cv_func_strerror_r_char_p: Particular Functions. (line 408) * ac_cv_func_strnlen_working: Particular Functions. (line 453) * ac_cv_func_strtod: Particular Functions. (line 431) * ac_cv_func_strtold: Particular Functions. (line 443) * ac_cv_func_utime_null: Particular Functions. (line 463) * ac_cv_func_working_mktime: Particular Functions. (line 305) * ac_cv_have_decl_SYMBOL: Generic Declarations. (line 11) * ac_cv_have_decl_SYMBOL <1>: Generic Declarations. (line 34) * ac_cv_header_HEADER-FILE: Generic Headers. (line 13) * ac_cv_header_HEADER-FILE <1>: Generic Headers. (line 39) * ac_cv_header_stdbool_h: Particular Headers. (line 10) * ac_cv_header_stdbool_h <1>: Particular Headers. (line 127) * ac_cv_header_stdc: Particular Headers. (line 161) * ac_cv_header_sys_wait_h: Particular Headers. (line 172) * ac_cv_header_time: Obsolete Macros. (line 267) * ac_cv_lib_error_at_line: Particular Functions. (line 73) * ac_cv_lib_LIBRARY_FUNCTION: Libraries. (line 11) * ac_cv_member_AGGREGATE_MEMBER: Generic Structures. (line 11) * ac_cv_member_struct_stat_st_blocks: Particular Structures. (line 26) * ac_cv_path_install: Particular Programs. (line 43) * ac_cv_path_mkdir: Particular Programs. (line 80) * ac_cv_path_SED: Particular Programs. (line 232) * ac_cv_path_VARIABLE: Generic Programs. (line 108) * ac_cv_path_VARIABLE <1>: Generic Programs. (line 115) * ac_cv_path_VARIABLE <2>: Generic Programs. (line 123) * ac_cv_prog_AWK: Particular Programs. (line 10) * ac_cv_prog_cc_COMPILER_c_o: C Compiler. (line 127) * ac_cv_prog_cxx_openmp: Generic Compiler Characteristics. (line 64) * ac_cv_prog_c_openmp: Generic Compiler Characteristics. (line 64) * ac_cv_prog_EGREP: Particular Programs. (line 29) * ac_cv_prog_f77_c_o: Fortran Compiler. (line 75) * ac_cv_prog_f77_g: Fortran Compiler. (line 19) * ac_cv_prog_f77_openmp: Generic Compiler Characteristics. (line 64) * ac_cv_prog_f77_v: Fortran Compiler. (line 91) * ac_cv_prog_fc_c_o: Fortran Compiler. (line 75) * ac_cv_prog_fc_g: Fortran Compiler. (line 44) * ac_cv_prog_fc_openmp: Generic Compiler Characteristics. (line 64) * ac_cv_prog_fc_v: Fortran Compiler. (line 91) * ac_cv_prog_FGREP: Particular Programs. (line 36) * ac_cv_prog_GREP: Particular Programs. (line 20) * ac_cv_prog_LEX: Particular Programs. (line 114) * ac_cv_prog_VARIABLE: Generic Programs. (line 24) * ac_cv_prog_VARIABLE <1>: Generic Programs. (line 36) * ac_cv_prog_YACC: Particular Programs. (line 241) * ac_cv_search_FUNCTION: Libraries. (line 52) * ac_cv_search_getmntent: Particular Functions. (line 191) * ac_cv_sizeof_TYPE-OR-EXPR: Generic Compiler Characteristics. (line 8) * ac_cv_sys_posix_termios: System Services. (line 76) * ac_cv_type_getgroups: Particular Types. (line 14) * ac_cv_type_long_double: Particular Types. (line 59) * ac_cv_type_long_double_wider: Particular Types. (line 70) * ac_cv_type_long_long_int: Particular Types. (line 78) * ac_cv_type_mbstate_t: Particular Types. (line 88) * ac_cv_type_mode_t: Particular Types. (line 96) * ac_cv_type_off_t: Particular Types. (line 102) * ac_cv_type_pid_t: Particular Types. (line 108) * ac_cv_type_size_t: Particular Types. (line 114) * ac_cv_type_ssize_t: Particular Types. (line 120) * ac_cv_type_TYPE: Generic Types. (line 11) * ac_cv_type_uid_t: Particular Types. (line 126) * ac_cv_type_unsigned_long_long_int: Particular Types. (line 157)  File: autoconf.info, Node: Autoconf Macro Index, Next: M4 Macro Index, Prev: Cache Variable Index, Up: Indices B.5 Autoconf Macro Index ======================== This is an alphabetical list of the Autoconf macros. [index] * Menu: * AC_ACT_IFELSE: AC_ACT_IFELSE vs AC_TRY_ACT. (line 6) * AC_AIX: Obsolete Macros. (line 20) * AC_ALLOCA: Obsolete Macros. (line 24) * AC_ARG_ARRAY: Obsolete Macros. (line 27) * AC_ARG_ENABLE: Package Options. (line 35) * AC_ARG_PROGRAM: Transforming Names. (line 11) * AC_ARG_VAR: Setting Output Variables. (line 80) * AC_ARG_WITH: External Software. (line 36) * AC_AUTOCONF_VERSION: Versioning. (line 21) * AC_BEFORE: Suggested Ordering. (line 28) * AC_CACHE_CHECK: Caching Results. (line 29) * AC_CACHE_LOAD: Cache Checkpointing. (line 13) * AC_CACHE_SAVE: Cache Checkpointing. (line 17) * AC_CACHE_VAL: Caching Results. (line 15) * AC_CANONICAL_BUILD: Canonicalizing. (line 21) * AC_CANONICAL_HOST: Canonicalizing. (line 29) * AC_CANONICAL_SYSTEM: Obsolete Macros. (line 41) * AC_CANONICAL_TARGET: Canonicalizing. (line 36) * AC_CHAR_UNSIGNED: Obsolete Macros. (line 51) * AC_CHECKING: Obsolete Macros. (line 101) * AC_CHECK_ALIGNOF: Generic Compiler Characteristics. (line 30) * AC_CHECK_DECL: Generic Declarations. (line 11) * AC_CHECK_DECLS: Generic Declarations. (line 34) * AC_CHECK_DECLS_ONCE: Generic Declarations. (line 79) * AC_CHECK_FILE: Files. (line 13) * AC_CHECK_FILES: Files. (line 21) * AC_CHECK_FUNC: Generic Functions. (line 15) * AC_CHECK_FUNCS: Generic Functions. (line 27) * AC_CHECK_FUNCS_ONCE: Generic Functions. (line 38) * AC_CHECK_HEADER: Generic Headers. (line 13) * AC_CHECK_HEADERS: Generic Headers. (line 39) * AC_CHECK_HEADERS_ONCE: Generic Headers. (line 56) * AC_CHECK_HEADER_STDBOOL: Particular Headers. (line 10) * AC_CHECK_INCLUDES_DEFAULT: Default Includes. (line 90) * AC_CHECK_LIB: Libraries. (line 11) * AC_CHECK_MEMBER: Generic Structures. (line 11) * AC_CHECK_MEMBERS: Generic Structures. (line 29) * AC_CHECK_PROG: Generic Programs. (line 24) * AC_CHECK_PROGS: Generic Programs. (line 36) * AC_CHECK_SIZEOF: Generic Compiler Characteristics. (line 8) * AC_CHECK_TARGET_TOOL: Generic Programs. (line 48) * AC_CHECK_TARGET_TOOLS: Generic Programs. (line 79) * AC_CHECK_TOOL: Generic Programs. (line 64) * AC_CHECK_TOOLS: Generic Programs. (line 92) * AC_CHECK_TYPE: Generic Types. (line 11) * AC_CHECK_TYPE <1>: Obsolete Macros. (line 54) * AC_CHECK_TYPES: Generic Types. (line 27) * AC_COMPILE_CHECK: Obsolete Macros. (line 109) * AC_COMPILE_IFELSE: Running the Compiler. (line 13) * AC_COMPUTE_INT: Generic Compiler Characteristics. (line 42) * AC_CONFIG_AUX_DIR: Input. (line 76) * AC_CONFIG_COMMANDS: Configuration Commands. (line 13) * AC_CONFIG_COMMANDS_POST: Configuration Commands. (line 42) * AC_CONFIG_COMMANDS_PRE: Configuration Commands. (line 36) * AC_CONFIG_FILES: Configuration Files. (line 9) * AC_CONFIG_HEADERS: Configuration Headers. (line 37) * AC_CONFIG_ITEMS: Configuration Actions. (line 12) * AC_CONFIG_LIBOBJ_DIR: Generic Functions. (line 97) * AC_CONFIG_LINKS: Configuration Links. (line 12) * AC_CONFIG_MACRO_DIR: Input. (line 24) * AC_CONFIG_MACRO_DIRS: Input. (line 24) * AC_CONFIG_MACRO_DIR_TRACE: Input. (line 24) * AC_CONFIG_SRCDIR: Input. (line 9) * AC_CONFIG_SUBDIRS: Subdirectories. (line 12) * AC_CONFIG_TESTDIR: Making testsuite Scripts. (line 27) * AC_CONST: Obsolete Macros. (line 117) * AC_COPYRIGHT: Notices. (line 10) * AC_CROSS_CHECK: Obsolete Macros. (line 120) * AC_CYGWIN: Obsolete Macros. (line 124) * AC_C_BACKSLASH_A: C Compiler. (line 164) * AC_C_BIGENDIAN: C Compiler. (line 172) * AC_C_CHAR_UNSIGNED: C Compiler. (line 283) * AC_C_CONST: C Compiler. (line 205) * AC_C_CROSS: Obsolete Macros. (line 30) * AC_C_FLEXIBLE_ARRAY_MEMBER: C Compiler. (line 307) * AC_C_INLINE: C Compiler. (line 278) * AC_C_LONG_DOUBLE: Obsolete Macros. (line 33) * AC_C_PROTOTYPES: C Compiler. (line 348) * AC_C_RESTRICT: C Compiler. (line 239) * AC_C_STRINGIZE: C Compiler. (line 297) * AC_C_TYPEOF: C Compiler. (line 341) * AC_C_VARARRAYS: C Compiler. (line 331) * AC_C_VOLATILE: C Compiler. (line 257) * AC_C__GENERIC: C Compiler. (line 235) * AC_DATAROOTDIR_CHECKED: Changed Directory Variables. (line 58) * AC_DECL_SYS_SIGLIST: Obsolete Macros. (line 141) * AC_DECL_YYTEXT: Obsolete Macros. (line 154) * AC_DEFINE: Defining Symbols. (line 32) * AC_DEFINE_UNQUOTED: Defining Symbols. (line 74) * AC_DEFUN: Macro Definitions. (line 7) * AC_DEFUN_ONCE: One-Shot Macros. (line 14) * AC_DIAGNOSE: Obsolete Macros. (line 158) * AC_DIR_HEADER: Obsolete Macros. (line 161) * AC_DISABLE_OPTION_CHECKING: Option Checking. (line 28) * AC_DYNIX_SEQ: Obsolete Macros. (line 173) * AC_EGREP_CPP: Running the Preprocessor. (line 76) * AC_EGREP_HEADER: Running the Preprocessor. (line 67) * AC_EMXOS2: Obsolete Macros. (line 186) * AC_ENABLE: Obsolete Macros. (line 192) * AC_ERLANG_CHECK_LIB: Erlang Libraries. (line 36) * AC_ERLANG_NEED_ERL: Erlang Compiler and Interpreter. (line 41) * AC_ERLANG_NEED_ERLC: Erlang Compiler and Interpreter. (line 24) * AC_ERLANG_PATH_ERL: Erlang Compiler and Interpreter. (line 29) * AC_ERLANG_PATH_ERLC: Erlang Compiler and Interpreter. (line 10) * AC_ERLANG_SUBST_ERTS_VER: Erlang Libraries. (line 12) * AC_ERLANG_SUBST_INSTALL_LIB_DIR: Installation Directory Variables. (line 210) * AC_ERLANG_SUBST_INSTALL_LIB_DIR <1>: Erlang Libraries. (line 86) * AC_ERLANG_SUBST_INSTALL_LIB_SUBDIR: Installation Directory Variables. (line 215) * AC_ERLANG_SUBST_INSTALL_LIB_SUBDIR <1>: Erlang Libraries. (line 94) * AC_ERLANG_SUBST_LIB_DIR: Erlang Libraries. (line 28) * AC_ERLANG_SUBST_ROOT_DIR: Erlang Libraries. (line 22) * AC_ERROR: Obsolete Macros. (line 196) * AC_EXEEXT: Obsolete Macros. (line 181) * AC_F77_DUMMY_MAIN: Fortran Compiler. (line 129) * AC_F77_FUNC: Fortran Compiler. (line 262) * AC_F77_IMPLICIT_NONE: Fortran Compiler. (line 428) * AC_F77_LIBRARY_LDFLAGS: Fortran Compiler. (line 91) * AC_F77_MAIN: Fortran Compiler. (line 174) * AC_F77_WRAPPERS: Fortran Compiler. (line 199) * AC_FATAL: Obsolete Macros. (line 199) * AC_FC_CHECK_BOUNDS: Fortran Compiler. (line 413) * AC_FC_DUMMY_MAIN: Fortran Compiler. (line 129) * AC_FC_FIXEDFORM: Fortran Compiler. (line 373) * AC_FC_FREEFORM: Fortran Compiler. (line 348) * AC_FC_FUNC: Fortran Compiler. (line 262) * AC_FC_IMPLICIT_NONE: Fortran Compiler. (line 428) * AC_FC_LIBRARY_LDFLAGS: Fortran Compiler. (line 91) * AC_FC_LINE_LENGTH: Fortran Compiler. (line 395) * AC_FC_MAIN: Fortran Compiler. (line 174) * AC_FC_MODULE_EXTENSION: Fortran Compiler. (line 440) * AC_FC_MODULE_FLAG: Fortran Compiler. (line 468) * AC_FC_MODULE_OUTPUT_FLAG: Fortran Compiler. (line 505) * AC_FC_PP_DEFINE: Fortran Compiler. (line 332) * AC_FC_PP_SRCEXT: Fortran Compiler. (line 275) * AC_FC_SRCEXT: Fortran Compiler. (line 275) * AC_FC_WRAPPERS: Fortran Compiler. (line 199) * AC_FIND_X: Obsolete Macros. (line 202) * AC_FIND_XTRA: Obsolete Macros. (line 205) * AC_FOREACH: Obsolete Macros. (line 208) * AC_FUNC_ALLOCA: Particular Functions. (line 10) * AC_FUNC_CHECK: Obsolete Macros. (line 211) * AC_FUNC_CHOWN: Particular Functions. (line 48) * AC_FUNC_CLOSEDIR_VOID: Particular Functions. (line 58) * AC_FUNC_ERROR_AT_LINE: Particular Functions. (line 73) * AC_FUNC_FNMATCH: Particular Functions. (line 83) * AC_FUNC_FNMATCH_GNU: Particular Functions. (line 98) * AC_FUNC_FORK: Particular Functions. (line 109) * AC_FUNC_FSEEKO: Particular Functions. (line 133) * AC_FUNC_GETGROUPS: Particular Functions. (line 145) * AC_FUNC_GETLOADAVG: Particular Functions. (line 157) * AC_FUNC_GETMNTENT: Particular Functions. (line 191) * AC_FUNC_GETPGRP: Particular Functions. (line 204) * AC_FUNC_LSTAT: Particular Functions. (line 382) * AC_FUNC_LSTAT_FOLLOWS_SLASHED_SYMLINK: Particular Functions. (line 227) * AC_FUNC_MALLOC: Particular Functions. (line 246) * AC_FUNC_MBRTOWC: Particular Functions. (line 281) * AC_FUNC_MEMCMP: Particular Functions. (line 292) * AC_FUNC_MKTIME: Particular Functions. (line 305) * AC_FUNC_MMAP: Particular Functions. (line 317) * AC_FUNC_OBSTACK: Particular Functions. (line 329) * AC_FUNC_REALLOC: Particular Functions. (line 339) * AC_FUNC_SELECT_ARGTYPES: Particular Functions. (line 353) * AC_FUNC_SETPGRP: Particular Functions. (line 364) * AC_FUNC_SETVBUF_REVERSED: Obsolete Macros. (line 214) * AC_FUNC_STAT: Particular Functions. (line 382) * AC_FUNC_STRCOLL: Particular Functions. (line 398) * AC_FUNC_STRERROR_R: Particular Functions. (line 408) * AC_FUNC_STRFTIME: Particular Functions. (line 424) * AC_FUNC_STRNLEN: Particular Functions. (line 453) * AC_FUNC_STRTOD: Particular Functions. (line 431) * AC_FUNC_STRTOLD: Particular Functions. (line 443) * AC_FUNC_UTIME_NULL: Particular Functions. (line 463) * AC_FUNC_VPRINTF: Particular Functions. (line 473) * AC_FUNC_WAIT3: Obsolete Macros. (line 222) * AC_GCC_TRADITIONAL: Obsolete Macros. (line 230) * AC_GETGROUPS_T: Obsolete Macros. (line 234) * AC_GETLOADAVG: Obsolete Macros. (line 237) * AC_GNU_SOURCE: Obsolete Macros. (line 240) * AC_HAVE_FUNCS: Obsolete Macros. (line 244) * AC_HAVE_HEADERS: Obsolete Macros. (line 247) * AC_HAVE_LIBRARY: Obsolete Macros. (line 251) * AC_HAVE_POUNDBANG: Obsolete Macros. (line 258) * AC_HEADER_ASSERT: Particular Headers. (line 20) * AC_HEADER_CHECK: Obsolete Macros. (line 261) * AC_HEADER_DIRENT: Particular Headers. (line 25) * AC_HEADER_EGREP: Obsolete Macros. (line 264) * AC_HEADER_MAJOR: Particular Headers. (line 68) * AC_HEADER_RESOLV: Particular Headers. (line 99) * AC_HEADER_STAT: Particular Headers. (line 118) * AC_HEADER_STDBOOL: Particular Headers. (line 127) * AC_HEADER_STDC: Particular Headers. (line 161) * AC_HEADER_SYS_WAIT: Particular Headers. (line 172) * AC_HEADER_TIME: Obsolete Macros. (line 267) * AC_HEADER_TIOCGWINSZ: Particular Headers. (line 212) * AC_HELP_STRING: Obsolete Macros. (line 278) * AC_INCLUDES_DEFAULT: Default Includes. (line 37) * AC_INIT: Initializing configure. (line 14) * AC_INIT <1>: Obsolete Macros. (line 281) * AC_INLINE: Obsolete Macros. (line 289) * AC_INT_16_BITS: Obsolete Macros. (line 292) * AC_IRIX_SUN: Obsolete Macros. (line 296) * AC_ISC_POSIX: Obsolete Macros. (line 312) * AC_LANG: Language Choice. (line 14) * AC_LANG_ASSERT: Language Choice. (line 79) * AC_LANG_C: Obsolete Macros. (line 319) * AC_LANG_CALL: Generating Sources. (line 141) * AC_LANG_CONFTEST: Generating Sources. (line 12) * AC_LANG_CPLUSPLUS: Obsolete Macros. (line 322) * AC_LANG_DEFINES_PROVIDED: Generating Sources. (line 30) * AC_LANG_FORTRAN77: Obsolete Macros. (line 325) * AC_LANG_FUNC_LINK_TRY: Generating Sources. (line 153) * AC_LANG_POP: Language Choice. (line 66) * AC_LANG_PROGRAM: Generating Sources. (line 77) * AC_LANG_PUSH: Language Choice. (line 61) * AC_LANG_RESTORE: Obsolete Macros. (line 328) * AC_LANG_SAVE: Obsolete Macros. (line 334) * AC_LANG_SOURCE: Generating Sources. (line 39) * AC_LANG_WERROR: Generic Compiler Characteristics. (line 54) * AC_LIBOBJ: Generic Functions. (line 56) * AC_LIBSOURCE: Generic Functions. (line 65) * AC_LIBSOURCES: Generic Functions. (line 89) * AC_LINK_FILES: Obsolete Macros. (line 339) * AC_LINK_IFELSE: Running the Linker. (line 24) * AC_LN_S: Obsolete Macros. (line 351) * AC_LONG_64_BITS: Obsolete Macros. (line 354) * AC_LONG_DOUBLE: Obsolete Macros. (line 359) * AC_LONG_FILE_NAMES: Obsolete Macros. (line 367) * AC_MAJOR_HEADER: Obsolete Macros. (line 372) * AC_MEMORY_H: Obsolete Macros. (line 375) * AC_MINGW32: Obsolete Macros. (line 381) * AC_MINIX: Obsolete Macros. (line 387) * AC_MINUS_C_MINUS_O: Obsolete Macros. (line 391) * AC_MMAP: Obsolete Macros. (line 394) * AC_MODE_T: Obsolete Macros. (line 397) * AC_MSG_CHECKING: Printing Messages. (line 24) * AC_MSG_ERROR: Printing Messages. (line 55) * AC_MSG_FAILURE: Printing Messages. (line 65) * AC_MSG_NOTICE: Printing Messages. (line 45) * AC_MSG_RESULT: Printing Messages. (line 35) * AC_MSG_WARN: Printing Messages. (line 71) * AC_OBJEXT: Obsolete Macros. (line 400) * AC_OBSOLETE: Obsolete Macros. (line 406) * AC_OFF_T: Obsolete Macros. (line 421) * AC_OPENMP: Generic Compiler Characteristics. (line 64) * AC_OUTPUT: Output. (line 13) * AC_OUTPUT <1>: Obsolete Macros. (line 424) * AC_OUTPUT_COMMANDS: Obsolete Macros. (line 436) * AC_PACKAGE_BUGREPORT: Initializing configure. (line 117) * AC_PACKAGE_NAME: Initializing configure. (line 105) * AC_PACKAGE_STRING: Initializing configure. (line 114) * AC_PACKAGE_TARNAME: Initializing configure. (line 108) * AC_PACKAGE_URL: Initializing configure. (line 121) * AC_PACKAGE_VERSION: Initializing configure. (line 111) * AC_PATH_PROG: Generic Programs. (line 108) * AC_PATH_PROGS: Generic Programs. (line 115) * AC_PATH_PROGS_FEATURE_CHECK: Generic Programs. (line 123) * AC_PATH_TARGET_TOOL: Generic Programs. (line 159) * AC_PATH_TOOL: Generic Programs. (line 164) * AC_PATH_X: System Services. (line 10) * AC_PATH_XTRA: System Services. (line 30) * AC_PID_T: Obsolete Macros. (line 466) * AC_PREFIX: Obsolete Macros. (line 469) * AC_PREFIX_DEFAULT: Default Prefix. (line 16) * AC_PREFIX_PROGRAM: Default Prefix. (line 25) * AC_PREPROC_IFELSE: Running the Preprocessor. (line 20) * AC_PREREQ: Versioning. (line 11) * AC_PRESERVE_HELP_ORDER: Help Formatting. (line 20) * AC_PROGRAMS_CHECK: Obsolete Macros. (line 481) * AC_PROGRAMS_PATH: Obsolete Macros. (line 484) * AC_PROGRAM_CHECK: Obsolete Macros. (line 487) * AC_PROGRAM_EGREP: Obsolete Macros. (line 490) * AC_PROGRAM_PATH: Obsolete Macros. (line 493) * AC_PROG_AWK: Particular Programs. (line 10) * AC_PROG_CC: C Compiler. (line 61) * AC_PROG_CC_C89: Obsolete Macros. (line 472) * AC_PROG_CC_C99: Obsolete Macros. (line 475) * AC_PROG_CC_C_O: C Compiler. (line 127) * AC_PROG_CC_STDC: Obsolete Macros. (line 478) * AC_PROG_CPP: C Compiler. (line 138) * AC_PROG_CPP_WERROR: C Compiler. (line 153) * AC_PROG_CXX: C++ Compiler. (line 7) * AC_PROG_CXXCPP: C++ Compiler. (line 66) * AC_PROG_CXX_C_O: C++ Compiler. (line 83) * AC_PROG_EGREP: Particular Programs. (line 29) * AC_PROG_F77: Fortran Compiler. (line 19) * AC_PROG_F77_C_O: Fortran Compiler. (line 75) * AC_PROG_FC: Fortran Compiler. (line 44) * AC_PROG_FC_C_O: Fortran Compiler. (line 75) * AC_PROG_FGREP: Particular Programs. (line 36) * AC_PROG_GCC_TRADITIONAL: C Compiler. (line 358) * AC_PROG_GREP: Particular Programs. (line 20) * AC_PROG_INSTALL: Particular Programs. (line 43) * AC_PROG_LEX: Particular Programs. (line 114) * AC_PROG_LN_S: Particular Programs. (line 209) * AC_PROG_MAKE_SET: Output. (line 45) * AC_PROG_MKDIR_P: Particular Programs. (line 80) * AC_PROG_OBJC: Objective C Compiler. (line 7) * AC_PROG_OBJCPP: Objective C Compiler. (line 27) * AC_PROG_OBJCXX: Objective C++ Compiler. (line 7) * AC_PROG_OBJCXXCPP: Objective C++ Compiler. (line 27) * AC_PROG_RANLIB: Particular Programs. (line 228) * AC_PROG_SED: Particular Programs. (line 232) * AC_PROG_YACC: Particular Programs. (line 241) * AC_REMOTE_TAPE: Obsolete Macros. (line 496) * AC_REPLACE_FNMATCH: Particular Functions. (line 482) * AC_REPLACE_FUNCS: Generic Functions. (line 117) * AC_REQUIRE: Prerequisite Macros. (line 17) * AC_REQUIRE_AUX_FILE: Input. (line 87) * AC_REQUIRE_CPP: Language Choice. (line 94) * AC_RESTARTABLE_SYSCALLS: Obsolete Macros. (line 499) * AC_RETSIGTYPE: Obsolete Macros. (line 507) * AC_REVISION: Notices. (line 18) * AC_RSH: Obsolete Macros. (line 511) * AC_RUN_IFELSE: Runtime. (line 20) * AC_SCO_INTL: Obsolete Macros. (line 514) * AC_SEARCH_LIBS: Libraries. (line 52) * AC_SETVBUF_REVERSED: Obsolete Macros. (line 523) * AC_SET_MAKE: Obsolete Macros. (line 528) * AC_SIZEOF_TYPE: Obsolete Macros. (line 531) * AC_SIZE_T: Obsolete Macros. (line 534) * AC_STAT_MACROS_BROKEN: Obsolete Macros. (line 537) * AC_STDC_HEADERS: Obsolete Macros. (line 540) * AC_STRCOLL: Obsolete Macros. (line 545) * AC_STRUCT_DIRENT_D_INO: Particular Structures. (line 9) * AC_STRUCT_DIRENT_D_TYPE: Particular Structures. (line 21) * AC_STRUCT_ST_BLKSIZE: Obsolete Macros. (line 548) * AC_STRUCT_ST_BLOCKS: Particular Structures. (line 26) * AC_STRUCT_ST_RDEV: Obsolete Macros. (line 557) * AC_STRUCT_TIMEZONE: Particular Structures. (line 43) * AC_STRUCT_TM: Particular Structures. (line 35) * AC_ST_BLKSIZE: Obsolete Macros. (line 566) * AC_ST_BLOCKS: Obsolete Macros. (line 569) * AC_ST_RDEV: Obsolete Macros. (line 572) * AC_SUBST: Setting Output Variables. (line 13) * AC_SUBST_FILE: Setting Output Variables. (line 39) * AC_SYS_INTERPRETER: System Services. (line 41) * AC_SYS_LARGEFILE: System Services. (line 48) * AC_SYS_LONG_FILE_NAMES: System Services. (line 72) * AC_SYS_POSIX_TERMIOS: System Services. (line 76) * AC_SYS_RESTARTABLE_SYSCALLS: Obsolete Macros. (line 575) * AC_SYS_SIGLIST_DECLARED: Obsolete Macros. (line 590) * AC_TEST_CPP: Obsolete Macros. (line 595) * AC_TEST_PROGRAM: Obsolete Macros. (line 599) * AC_TIMEZONE: Obsolete Macros. (line 603) * AC_TIME_WITH_SYS_TIME: Obsolete Macros. (line 606) * AC_TRY_ACT: AC_ACT_IFELSE vs AC_TRY_ACT. (line 6) * AC_TRY_COMPILE: Obsolete Macros. (line 612) * AC_TRY_CPP: Obsolete Macros. (line 631) * AC_TRY_LINK: Obsolete Macros. (line 644) * AC_TRY_LINK_FUNC: Obsolete Macros. (line 673) * AC_TRY_RUN: Obsolete Macros. (line 680) * AC_TYPE_GETGROUPS: Particular Types. (line 14) * AC_TYPE_INT16_T: Particular Types. (line 40) * AC_TYPE_INT32_T: Particular Types. (line 43) * AC_TYPE_INT64_T: Particular Types. (line 46) * AC_TYPE_INT8_T: Particular Types. (line 21) * AC_TYPE_INTMAX_T: Particular Types. (line 49) * AC_TYPE_INTPTR_T: Particular Types. (line 54) * AC_TYPE_LONG_DOUBLE: Particular Types. (line 59) * AC_TYPE_LONG_DOUBLE_WIDER: Particular Types. (line 70) * AC_TYPE_LONG_LONG_INT: Particular Types. (line 78) * AC_TYPE_MBSTATE_T: Particular Types. (line 88) * AC_TYPE_MODE_T: Particular Types. (line 96) * AC_TYPE_OFF_T: Particular Types. (line 102) * AC_TYPE_PID_T: Particular Types. (line 108) * AC_TYPE_SIGNAL: Obsolete Macros. (line 691) * AC_TYPE_SIZE_T: Particular Types. (line 114) * AC_TYPE_SSIZE_T: Particular Types. (line 120) * AC_TYPE_UID_T: Particular Types. (line 126) * AC_TYPE_UINT16_T: Particular Types. (line 138) * AC_TYPE_UINT32_T: Particular Types. (line 141) * AC_TYPE_UINT64_T: Particular Types. (line 144) * AC_TYPE_UINT8_T: Particular Types. (line 132) * AC_TYPE_UINTMAX_T: Particular Types. (line 147) * AC_TYPE_UINTPTR_T: Particular Types. (line 152) * AC_TYPE_UNSIGNED_LONG_LONG_INT: Particular Types. (line 157) * AC_UID_T: Obsolete Macros. (line 708) * AC_UNISTD_H: Obsolete Macros. (line 711) * AC_USE_SYSTEM_EXTENSIONS: C and Posix Variants. (line 11) * AC_USG: Obsolete Macros. (line 717) * AC_UTIME_NULL: Obsolete Macros. (line 728) * AC_VALIDATE_CACHED_SYSTEM_TUPLE: Obsolete Macros. (line 731) * AC_VERBOSE: Obsolete Macros. (line 736) * AC_VFORK: Obsolete Macros. (line 739) * AC_VPRINTF: Obsolete Macros. (line 742) * AC_WAIT3: Obsolete Macros. (line 745) * AC_WARN: Obsolete Macros. (line 750) * AC_WARNING: Obsolete Macros. (line 753) * AC_WITH: Obsolete Macros. (line 756) * AC_WORDS_BIGENDIAN: Obsolete Macros. (line 760) * AC_XENIX_DIR: Obsolete Macros. (line 763) * AC_YYTEXT_POINTER: Obsolete Macros. (line 780) * AH_BOTTOM: Autoheader Macros. (line 46) * AH_HEADER: Configuration Headers. (line 58) * AH_TEMPLATE: Autoheader Macros. (line 19) * AH_TOP: Autoheader Macros. (line 43) * AH_VERBATIM: Autoheader Macros. (line 36) * AU_ALIAS: Obsoleting Macros. (line 49) * AU_DEFUN: Obsoleting Macros. (line 18)  File: autoconf.info, Node: M4 Macro Index, Next: Autotest Macro Index, Prev: Autoconf Macro Index, Up: Indices B.6 M4 Macro Index ================== This is an alphabetical list of the M4, M4sugar, and M4sh macros. [index] * Menu: * __file__: Redefined M4 Macros. (line 41) * __line__: Redefined M4 Macros. (line 41) * __oline__: Redefined M4 Macros. (line 45) * AS_BOURNE_COMPATIBLE: Initialization Macros. (line 7) * AS_BOX: Common Shell Constructs. (line 10) * AS_CASE: Common Shell Constructs. (line 19) * AS_DIRNAME: Common Shell Constructs. (line 31) * AS_ECHO: Common Shell Constructs. (line 39) * AS_ECHO_N: Common Shell Constructs. (line 47) * AS_ESCAPE: Common Shell Constructs. (line 55) * AS_EXECUTABLE_P: Common Shell Constructs. (line 95) * AS_EXIT: Common Shell Constructs. (line 100) * AS_HELP_STRING: Pretty Help Strings. (line 15) * AS_IF: Common Shell Constructs. (line 106) * AS_INIT: Initialization Macros. (line 14) * AS_INIT_GENERATED: Initialization Macros. (line 26) * AS_LINENO_PREPARE: Initialization Macros. (line 67) * AS_LITERAL_IF: Polymorphic Variables. (line 21) * AS_LITERAL_WORD_IF: Polymorphic Variables. (line 21) * AS_MESSAGE_FD: File Descriptor Macros. (line 17) * AS_MESSAGE_LOG_FD: File Descriptor Macros. (line 29) * AS_ME_PREPARE: Initialization Macros. (line 72) * AS_MKDIR_P: Common Shell Constructs. (line 124) * AS_ORIGINAL_STDIN_FD: File Descriptor Macros. (line 39) * AS_SET_CATFILE: Common Shell Constructs. (line 164) * AS_SET_STATUS: Common Shell Constructs. (line 136) * AS_SHELL_SANITIZE: Initialization Macros. (line 101) * AS_TMPDIR: Initialization Macros. (line 77) * AS_TR_CPP: Common Shell Constructs. (line 144) * AS_TR_SH: Common Shell Constructs. (line 153) * AS_UNSET: Common Shell Constructs. (line 168) * AS_VAR_APPEND: Polymorphic Variables. (line 62) * AS_VAR_ARITH: Polymorphic Variables. (line 83) * AS_VAR_COPY: Polymorphic Variables. (line 100) * AS_VAR_IF: Polymorphic Variables. (line 119) * AS_VAR_POPDEF: Polymorphic Variables. (line 128) * AS_VAR_PUSHDEF: Polymorphic Variables. (line 128) * AS_VAR_SET: Polymorphic Variables. (line 170) * AS_VAR_SET_IF: Polymorphic Variables. (line 180) * AS_VAR_TEST_SET: Polymorphic Variables. (line 185) * AS_VERSION_COMPARE: Common Shell Constructs. (line 174) * dnl: Redefined M4 Macros. (line 52) * m4_append: Text processing Macros. (line 16) * m4_append_uniq: Text processing Macros. (line 16) * m4_append_uniq_w: Text processing Macros. (line 69) * m4_apply: Evaluation Macros. (line 10) * m4_argn: Looping constructs. (line 29) * m4_assert: Diagnostic Macros. (line 11) * m4_bmatch: Conditional constructs. (line 11) * m4_bpatsubst: Redefined M4 Macros. (line 55) * m4_bpatsubsts: Conditional constructs. (line 18) * m4_bregexp: Redefined M4 Macros. (line 60) * m4_builtin: Redefined M4 Macros. (line 6) * m4_car: Looping constructs. (line 35) * m4_case: Conditional constructs. (line 33) * m4_cdr: Looping constructs. (line 41) * m4_changecom: Redefined M4 Macros. (line 6) * m4_changequote: Redefined M4 Macros. (line 6) * m4_chomp: Text processing Macros. (line 80) * m4_chomp_all: Text processing Macros. (line 80) * m4_cleardivert: Diversion support. (line 116) * m4_cmp: Number processing Macros. (line 11) * m4_combine: Text processing Macros. (line 88) * m4_cond: Conditional constructs. (line 42) * m4_copy: Redefined M4 Macros. (line 68) * m4_copy_force: Redefined M4 Macros. (line 68) * m4_count: Evaluation Macros. (line 26) * m4_curry: Evaluation Macros. (line 30) * m4_debugfile: Redefined M4 Macros. (line 6) * m4_debugmode: Redefined M4 Macros. (line 6) * m4_decr: Redefined M4 Macros. (line 6) * m4_default: Conditional constructs. (line 73) * m4_default_nblank: Conditional constructs. (line 73) * m4_default_nblank_quoted: Conditional constructs. (line 73) * m4_default_quoted: Conditional constructs. (line 73) * m4_define: Redefined M4 Macros. (line 6) * m4_define_default: Conditional constructs. (line 122) * m4_defn: Redefined M4 Macros. (line 87) * m4_divert: Redefined M4 Macros. (line 95) * m4_divert_once: Diversion support. (line 119) * m4_divert_pop: Diversion support. (line 124) * m4_divert_push: Diversion support. (line 130) * m4_divert_text: Diversion support. (line 136) * m4_divnum: Redefined M4 Macros. (line 6) * m4_do: Evaluation Macros. (line 45) * m4_dquote: Evaluation Macros. (line 65) * m4_dquote_elt: Evaluation Macros. (line 70) * m4_dumpdef: Redefined M4 Macros. (line 107) * m4_dumpdefs: Redefined M4 Macros. (line 107) * m4_echo: Evaluation Macros. (line 75) * m4_errprint: Redefined M4 Macros. (line 6) * m4_errprintn: Diagnostic Macros. (line 16) * m4_escape: Text processing Macros. (line 108) * m4_esyscmd: Redefined M4 Macros. (line 6) * m4_esyscmd_s: Redefined M4 Macros. (line 124) * m4_eval: Redefined M4 Macros. (line 6) * m4_exit: Redefined M4 Macros. (line 130) * m4_expand: Evaluation Macros. (line 79) * m4_fatal: Diagnostic Macros. (line 20) * m4_flatten: Text processing Macros. (line 113) * m4_for: Looping constructs. (line 59) * m4_foreach: Looping constructs. (line 69) * m4_foreach_w: Looping constructs. (line 83) * m4_format: Redefined M4 Macros. (line 6) * m4_if: Redefined M4 Macros. (line 136) * m4_ifblank: Conditional constructs. (line 127) * m4_ifdef: Redefined M4 Macros. (line 6) * m4_ifnblank: Conditional constructs. (line 127) * m4_ifndef: Conditional constructs. (line 135) * m4_ifset: Conditional constructs. (line 139) * m4_ifval: Conditional constructs. (line 145) * m4_ifvaln: Conditional constructs. (line 150) * m4_ignore: Evaluation Macros. (line 129) * m4_include: Redefined M4 Macros. (line 143) * m4_incr: Redefined M4 Macros. (line 6) * m4_index: Redefined M4 Macros. (line 6) * m4_indir: Redefined M4 Macros. (line 6) * m4_init: Diversion support. (line 161) * m4_join: Text processing Macros. (line 119) * m4_joinall: Text processing Macros. (line 119) * m4_len: Redefined M4 Macros. (line 6) * m4_list_cmp: Number processing Macros. (line 16) * m4_location: Diagnostic Macros. (line 24) * m4_maketemp: Redefined M4 Macros. (line 147) * m4_make_list: Evaluation Macros. (line 142) * m4_map: Looping constructs. (line 93) * m4_mapall: Looping constructs. (line 93) * m4_mapall_sep: Looping constructs. (line 93) * m4_map_args: Looping constructs. (line 130) * m4_map_args_pair: Looping constructs. (line 166) * m4_map_args_sep: Looping constructs. (line 178) * m4_map_args_w: Looping constructs. (line 189) * m4_map_sep: Looping constructs. (line 93) * m4_max: Number processing Macros. (line 38) * m4_min: Number processing Macros. (line 42) * m4_mkstemp: Redefined M4 Macros. (line 147) * m4_n: Conditional constructs. (line 154) * m4_newline: Text processing Macros. (line 134) * m4_normalize: Text processing Macros. (line 140) * m4_pattern_allow: Forbidden Patterns. (line 53) * m4_pattern_forbid: Forbidden Patterns. (line 17) * m4_popdef: Redefined M4 Macros. (line 157) * m4_pushdef: Redefined M4 Macros. (line 6) * m4_quote: Evaluation Macros. (line 161) * m4_rename: Redefined M4 Macros. (line 68) * m4_rename_force: Redefined M4 Macros. (line 68) * m4_reverse: Evaluation Macros. (line 167) * m4_re_escape: Text processing Macros. (line 148) * m4_set_add: Set manipulation Macros. (line 19) * m4_set_add_all: Set manipulation Macros. (line 25) * m4_set_contains: Set manipulation Macros. (line 29) * m4_set_contents: Set manipulation Macros. (line 49) * m4_set_delete: Set manipulation Macros. (line 79) * m4_set_difference: Set manipulation Macros. (line 85) * m4_set_dump: Set manipulation Macros. (line 49) * m4_set_empty: Set manipulation Macros. (line 107) * m4_set_foreach: Set manipulation Macros. (line 113) * m4_set_intersection: Set manipulation Macros. (line 85) * m4_set_list: Set manipulation Macros. (line 134) * m4_set_listc: Set manipulation Macros. (line 134) * m4_set_map: Set manipulation Macros. (line 169) * m4_set_map_sep: Set manipulation Macros. (line 182) * m4_set_remove: Set manipulation Macros. (line 193) * m4_set_size: Set manipulation Macros. (line 204) * m4_set_union: Set manipulation Macros. (line 85) * m4_shift: Redefined M4 Macros. (line 6) * m4_shift2: Looping constructs. (line 199) * m4_shift3: Looping constructs. (line 199) * m4_shiftn: Looping constructs. (line 199) * m4_sign: Number processing Macros. (line 46) * m4_sinclude: Redefined M4 Macros. (line 143) * m4_split: Text processing Macros. (line 152) * m4_stack_foreach: Looping constructs. (line 207) * m4_stack_foreach_lifo: Looping constructs. (line 207) * m4_stack_foreach_sep: Looping constructs. (line 229) * m4_stack_foreach_sep_lifo: Looping constructs. (line 229) * m4_strip: Text processing Macros. (line 158) * m4_substr: Redefined M4 Macros. (line 6) * m4_syscmd: Redefined M4 Macros. (line 6) * m4_sysval: Redefined M4 Macros. (line 6) * m4_text_box: Text processing Macros. (line 166) * m4_text_wrap: Text processing Macros. (line 180) * m4_tolower: Text processing Macros. (line 211) * m4_toupper: Text processing Macros. (line 211) * m4_traceoff: Redefined M4 Macros. (line 6) * m4_traceon: Redefined M4 Macros. (line 6) * m4_translit: Redefined M4 Macros. (line 6) * m4_undefine: Redefined M4 Macros. (line 161) * m4_undivert: Redefined M4 Macros. (line 169) * m4_unquote: Evaluation Macros. (line 176) * m4_version_compare: Number processing Macros. (line 50) * m4_version_prereq: Number processing Macros. (line 90) * m4_warn: Diagnostic Macros. (line 28) * m4_wrap: Redefined M4 Macros. (line 179) * m4_wrap_lifo: Redefined M4 Macros. (line 179)  File: autoconf.info, Node: Autotest Macro Index, Next: Program & Function Index, Prev: M4 Macro Index, Up: Indices B.7 Autotest Macro Index ======================== This is an alphabetical list of the Autotest macros. [index] * Menu: * AT_ARG_OPTION: Writing Testsuites. (line 50) * AT_ARG_OPTION_ARG: Writing Testsuites. (line 79) * AT_AT_PREPARE_EACH_TEST: Writing Testsuites. (line 145) * AT_BANNER: Writing Testsuites. (line 160) * AT_CAPTURE_FILE: Writing Testsuites. (line 190) * AT_CHECK: Writing Testsuites. (line 250) * AT_CHECK_EUNIT: Writing Testsuites. (line 355) * AT_CHECK_UNQUOTED: Writing Testsuites. (line 250) * AT_CLEANUP: Writing Testsuites. (line 231) * AT_COLOR_TESTS: Writing Testsuites. (line 105) * AT_COPYRIGHT: Writing Testsuites. (line 41) * AT_DATA: Writing Testsuites. (line 236) * AT_FAIL_IF: Writing Testsuites. (line 194) * AT_INIT: Writing Testsuites. (line 31) * AT_KEYWORDS: Writing Testsuites. (line 177) * AT_PACKAGE_BUGREPORT: Making testsuite Scripts. (line 13) * AT_PACKAGE_NAME: Making testsuite Scripts. (line 13) * AT_PACKAGE_STRING: Making testsuite Scripts. (line 13) * AT_PACKAGE_TARNAME: Making testsuite Scripts. (line 13) * AT_PACKAGE_URL: Making testsuite Scripts. (line 13) * AT_PACKAGE_VERSION: Making testsuite Scripts. (line 13) * AT_PREPARE_TESTS: Writing Testsuites. (line 129) * AT_SETUP: Writing Testsuites. (line 170) * AT_SKIP_IF: Writing Testsuites. (line 208) * AT_TESTED: Writing Testsuites. (line 109) * AT_XFAIL_IF: Writing Testsuites. (line 223)  File: autoconf.info, Node: Program & Function Index, Next: Concept Index, Prev: Autotest Macro Index, Up: Indices B.8 Program and Function Index ============================== This is an alphabetical list of the programs and functions whose portability is discussed in this document. [index] * Menu: * !: Limitations of Builtins. (line 41) * .: Limitations of Builtins. (line 17) * /usr/bin/ksh on Solaris: Shellology. (line 64) * /usr/dt/bin/dtksh on Solaris: Shellology. (line 66) * /usr/xpg4/bin/sh on Solaris: Shellology. (line 65) * {...}: Limitations of Builtins. (line 74) * alloca: Particular Functions. (line 10) * alloca.h: Particular Functions. (line 10) * assert.h: Default Includes. (line 6) * assert.h <1>: Particular Headers. (line 20) * awk: Limitations of Usual Tools. (line 10) * basename: Limitations of Usual Tools. (line 141) * break: Limitations of Builtins. (line 107) * case: Limitations of Builtins. (line 110) * cat: Limitations of Usual Tools. (line 145) * cc: Limitations of Usual Tools. (line 148) * cd: Limitations of Builtins. (line 203) * chgrp: Limitations of Usual Tools. (line 179) * chmod: Limitations of Usual Tools. (line 183) * chown: Particular Functions. (line 48) * chown <1>: Limitations of Usual Tools. (line 179) * closedir: Particular Functions. (line 58) * cmp: Limitations of Usual Tools. (line 193) * config.guess: Input. (line 58) * config.guess <1>: Manual Configuration. (line 13) * config.sub: Input. (line 58) * config.sub <1>: Manual Configuration. (line 13) * cp: Limitations of Usual Tools. (line 200) * ctype.h: Default Includes. (line 6) * date: Limitations of Usual Tools. (line 258) * diff: Limitations of Usual Tools. (line 268) * dirent.h: Particular Headers. (line 25) * dirname: Limitations of Usual Tools. (line 274) * echo: Limitations of Builtins. (line 233) * egrep: Limitations of Usual Tools. (line 281) * errno.h: Default Includes. (line 6) * error_at_line: Particular Functions. (line 73) * eval: Limitations of Builtins. (line 265) * exec: Limitations of Builtins. (line 308) * exit: Function Portability. (line 17) * exit <1>: Limitations of Builtins. (line 348) * export: Limitations of Builtins. (line 373) * expr: Limitations of Usual Tools. (line 306) * expr <1>: Limitations of Usual Tools. (line 339) * expr (|): Limitations of Usual Tools. (line 320) * false: Limitations of Builtins. (line 444) * fgrep: Limitations of Usual Tools. (line 429) * find: Limitations of Usual Tools. (line 438) * float.h: Default Includes. (line 6) * fnmatch: Particular Functions. (line 83) * fnmatch <1>: Particular Functions. (line 98) * fnmatch <2>: Particular Functions. (line 482) * fnmatch.h: Particular Functions. (line 482) * for: Limitations of Builtins. (line 448) * fork: Particular Functions. (line 109) * free: Function Portability. (line 27) * fseeko: Particular Functions. (line 133) * ftello: Particular Functions. (line 133) * getgroups: Particular Functions. (line 145) * getloadavg: Particular Functions. (line 157) * getmntent: Particular Functions. (line 191) * getpgid: Particular Functions. (line 204) * getpgrp: Particular Functions. (line 204) * grep: Limitations of Usual Tools. (line 452) * if: Limitations of Builtins. (line 526) * install-sh: Input. (line 58) * install-sh <1>: Particular Programs. (line 43) * install-sh <2>: Particular Programs. (line 80) * inttypes.h: Header Portability. (line 57) * inttypes.h <1>: Particular Types. (line 6) * isinf: Function Portability. (line 32) * isnan: Function Portability. (line 32) * iso646.h: Default Includes. (line 6) * join: Limitations of Usual Tools. (line 519) * ksh: Shellology. (line 56) * ksh88: Shellology. (line 56) * ksh93: Shellology. (line 56) * limits.h: Default Includes. (line 6) * linux/irda.h: Header Portability. (line 65) * linux/random.h: Header Portability. (line 68) * ln: Limitations of Usual Tools. (line 536) * locale.h: Default Includes. (line 6) * ls: Limitations of Usual Tools. (line 550) * lstat: Particular Functions. (line 227) * lstat <1>: Particular Functions. (line 382) * make: Portable Make. (line 6) * malloc: Function Portability. (line 81) * malloc <1>: Particular Functions. (line 246) * math.h: Default Includes. (line 6) * mbrtowc: Particular Functions. (line 281) * memcmp: Particular Functions. (line 292) * memory.h: Header Portability. (line 46) * mkdir: Limitations of Usual Tools. (line 572) * mkfifo: Limitations of Usual Tools. (line 606) * mknod: Limitations of Usual Tools. (line 606) * mktemp: Limitations of Usual Tools. (line 616) * mktime: Particular Functions. (line 305) * mmap: Particular Functions. (line 317) * mv: Limitations of Usual Tools. (line 641) * ndir.h: Particular Headers. (line 25) * net/if.h: Header Portability. (line 71) * netinet/if_ether.h: Header Portability. (line 74) * nlist.h: Particular Functions. (line 174) * od: Limitations of Usual Tools. (line 673) * pdksh: Shellology. (line 78) * printf: Limitations of Builtins. (line 565) * putenv: Function Portability. (line 88) * pwd: Limitations of Builtins. (line 598) * read: Limitations of Builtins. (line 627) * realloc: Function Portability. (line 104) * realloc <1>: Particular Functions. (line 339) * resolv.h: Particular Headers. (line 99) * rm: Limitations of Usual Tools. (line 697) * rmdir: Limitations of Usual Tools. (line 716) * sed: Limitations of Usual Tools. (line 720) * sed (t): Limitations of Usual Tools. (line 926) * select: Particular Functions. (line 353) * set: Limitations of Builtins. (line 633) * setjmp.h: Default Includes. (line 6) * setpgrp: Particular Functions. (line 364) * setvbuf: Obsolete Macros. (line 214) * shift: Limitations of Builtins. (line 784) * sigaction: Function Portability. (line 109) * signal: Function Portability. (line 109) * signal.h: Default Includes. (line 6) * signal.h <1>: Obsolete Macros. (line 691) * sleep: Limitations of Usual Tools. (line 986) * snprintf: Function Portability. (line 123) * sort: Limitations of Usual Tools. (line 992) * source: Limitations of Builtins. (line 792) * sprintf: Function Portability. (line 133) * sscanf: Function Portability. (line 139) * stat: Particular Functions. (line 382) * stdarg.h: Default Includes. (line 6) * stdbool.h: Particular Headers. (line 10) * stdbool.h <1>: Particular Headers. (line 127) * stddef.h: Default Includes. (line 6) * stdint.h: Header Portability. (line 57) * stdint.h <1>: Particular Types. (line 6) * stdio.h: Default Includes. (line 6) * stdlib.h: Default Includes. (line 6) * stdlib.h <1>: Particular Types. (line 6) * strcoll: Particular Functions. (line 398) * strerror_r: Function Portability. (line 147) * strerror_r <1>: Particular Functions. (line 408) * strftime: Particular Functions. (line 424) * string.h: Default Includes. (line 6) * strings.h: Header Portability. (line 49) * strnlen: Function Portability. (line 153) * strnlen <1>: Particular Functions. (line 453) * strtod: Particular Functions. (line 431) * strtold: Particular Functions. (line 443) * sys/dir.h: Particular Headers. (line 25) * sys/ioctl.h: Particular Headers. (line 212) * sys/mkdev.h: Particular Headers. (line 68) * sys/mount.h: Header Portability. (line 78) * sys/ndir.h: Particular Headers. (line 25) * sys/ptem.h: Header Portability. (line 81) * sys/socket.h: Header Portability. (line 84) * sys/stat.h: Particular Headers. (line 118) * sys/sysmacros.h: Particular Headers. (line 68) * sys/time.h: Particular Structures. (line 35) * sys/time.h <1>: Obsolete Macros. (line 267) * sys/types.h: Particular Types. (line 6) * sys/ucred.h: Header Portability. (line 87) * sys/wait.h: Particular Headers. (line 172) * sysconf: Function Portability. (line 168) * tar: Limitations of Usual Tools. (line 997) * termios.h: Particular Headers. (line 212) * test: Limitations of Builtins. (line 796) * time.h: Default Includes. (line 6) * time.h <1>: Particular Structures. (line 35) * time.h <2>: Obsolete Macros. (line 267) * touch: Limitations of Usual Tools. (line 1002) * tr: Limitations of Usual Tools. (line 1015) * trap: Limitations of Builtins. (line 910) * true: Limitations of Builtins. (line 996) * unistd.h: Particular Headers. (line 196) * unlink: Function Portability. (line 172) * unset: Limitations of Builtins. (line 1012) * unsetenv: Function Portability. (line 178) * utime: Particular Functions. (line 463) * va_copy: Function Portability. (line 183) * va_list: Function Portability. (line 190) * vfork: Particular Functions. (line 109) * vfork.h: Particular Functions. (line 109) * vprintf: Particular Functions. (line 473) * vsnprintf: Function Portability. (line 123) * vsprintf: Function Portability. (line 133) * vsprintf <1>: Particular Functions. (line 473) * wait: Limitations of Builtins. (line 1039) * wait3: Obsolete Macros. (line 222) * wchar.h: Default Includes. (line 6) * wchar.h <1>: Particular Types. (line 88) * wctype.h: Default Includes. (line 6) * X11/extensions/scrnsaver.h: Header Portability. (line 90)  File: autoconf.info, Node: Concept Index, Prev: Program & Function Index, Up: Indices B.9 Concept Index ================= This is an alphabetical list of the files, tools, and concepts introduced in this document. [index] * Menu: * "$@": Shell Substitutions. (line 70) * $((EXPRESSION)): Shell Substitutions. (line 467) * $(COMMANDS): Shell Substitutions. (line 435) * $<, explicit rules, and VPATH: $< in Explicit Rules. (line 6) * ${#VAR}: Shell Substitutions. (line 381) * ${VAR##WORD}: Shell Substitutions. (line 381) * ${VAR#WORD}: Shell Substitutions. (line 381) * ${VAR%%WORD}: Shell Substitutions. (line 381) * ${VAR%WORD}: Shell Substitutions. (line 381) * ${VAR+VALUE}: Shell Substitutions. (line 157) * ${VAR-VALUE}: Shell Substitutions. (line 157) * ${VAR=EXPANDED-VALUE}: Shell Substitutions. (line 332) * ${VAR=LITERAL}: Shell Substitutions. (line 308) * ${VAR=VALUE}: Shell Substitutions. (line 157) * ${VAR=VALUE} <1>: Shell Substitutions. (line 236) * ${VAR?VALUE}: Shell Substitutions. (line 157) * 64-bit libraries: Site Defaults. (line 98) * @&t@: Quadrigraphs. (line 6) * @S|@: Quadrigraphs. (line 6) * ^ quoting: Shell Substitutions. (line 506) * _m4_divert_diversion: New Macros. (line 6) * `COMMANDS`: Shell Substitutions. (line 389) * absolute file names, detect: File System Conventions. (line 51) * abs_builddir: Preset Output Variables. (line 179) * abs_srcdir: Preset Output Variables. (line 201) * abs_top_builddir: Preset Output Variables. (line 194) * abs_top_srcdir: Preset Output Variables. (line 208) * acconfig.h: acconfig Header. (line 6) * aclocal.m4: Making configure Scripts. (line 6) * ac_aux_dir: Input. (line 87) * ac_objext: Generic Functions. (line 59) * ac_path_VARIABLE: Generic Programs. (line 123) * ac_path_VARIABLE_found: Generic Programs. (line 123) * ac_srcdir: Configuration Actions. (line 85) * ac_top_build_prefix: Configuration Actions. (line 80) * ac_top_srcdir: Configuration Actions. (line 76) * Ash: Shellology. (line 16) * at_arg_OPTION: Writing Testsuites. (line 50) * at_arg_OPTION <1>: Writing Testsuites. (line 79) * at_optarg: Writing Testsuites. (line 62) * at_optarg <1>: Writing Testsuites. (line 90) * at_optarg_OPTION: Writing Testsuites. (line 62) * at_status: Writing Testsuites. (line 250) * autoconf: autoconf Invocation. (line 6) * Autoconf upgrading: Autoconf 1. (line 6) * Autoconf upgrading <1>: Autoconf 2.13. (line 6) * Autoconf version: Versioning. (line 6) * autoheader: autoheader Invocation. (line 6) * Autoheader macros: Autoheader Macros. (line 6) * autom4te debugging tips: Debugging via autom4te. (line 6) * Autom4te Library: autom4te Invocation. (line 220) * autom4te.cache: autom4te Invocation. (line 126) * autom4te.cfg: autom4te Invocation. (line 252) * Automake: Automake. (line 19) * Automatic remaking: Automatic Remaking. (line 6) * automatic rule rewriting and VPATH: Automatic Rule Rewriting. (line 6) * autopoint: autoreconf Invocation. (line 31) * autoreconf: autoreconf Invocation. (line 6) * autoscan: autoscan Invocation. (line 6) * Autotest: Using Autotest. (line 6) * AUTOTEST_PATH: testsuite Invocation. (line 59) * autoupdate: autoupdate Invocation. (line 6) * balancing parentheses: Balancing Parentheses. (line 6) * Bash: Shellology. (line 43) * Bash 2.05 and later: Shellology. (line 48) * bindir: Installation Directory Variables. (line 14) * Bootstrap: Bootstrapping. (line 6) * BSD make and obj/: obj/ and Make. (line 6) * buffer overruns: Buffer Overruns. (line 6) * Build directories: Build Directories. (line 6) * builddir: Preset Output Variables. (line 176) * C function portability: Function Portability. (line 6) * C types: Types. (line 6) * Cache: Caching Results. (line 6) * Cache variable: Cache Variable Names. (line 6) * Cache, enabling: configure Invocation. (line 25) * Canonical system type: Canonicalizing. (line 6) * carriage return, deleting: Limitations of Usual Tools. (line 1015) * CFLAGS: Preset Output Variables. (line 22) * changequote: Changequote is Evil. (line 6) * Coding style: Coding Style. (line 6) * Command Substitution: Shell Substitutions. (line 389) * command-line, macros set on: Command-line Macros and whitespace. (line 6) * Commands for configuration: Configuration Commands. (line 6) * Comments in Makefile macros: Comments in Make Macros. (line 6) * Comments in Makefile rules: Comments in Make Rules. (line 6) * Common autoconf behavior: Common Behavior. (line 6) * Compilers: Compilers and Preprocessors. (line 6) * composing variable names: Polymorphic Variables. (line 128) * config.h: Configuration Headers. (line 6) * config.h.bot: acconfig Header. (line 6) * config.h.in: Header Templates. (line 6) * config.h.top: acconfig Header. (line 6) * config.site: Site Defaults. (line 6) * config.status: config.status Invocation. (line 6) * config.sub: Specifying Target Triplets. (line 72) * Configuration actions: Configuration Actions. (line 6) * Configuration commands: Configuration Commands. (line 6) * Configuration file creation: Configuration Files. (line 6) * Configuration Header: Configuration Headers. (line 6) * Configuration Header Template: Header Templates. (line 6) * Configuration links: Configuration Links. (line 6) * configure: Making configure Scripts. (line 6) * configure <1>: Running configure Scripts. (line 6) * Configure subdirectories: Subdirectories. (line 6) * configure.ac: Making configure Scripts. (line 27) * configure.in: Writing Autoconf Input. (line 19) * configure_input: Preset Output Variables. (line 60) * CONFIG_COMMANDS: Obsolete config.status Use. (line 10) * CONFIG_FILES: Obsolete config.status Use. (line 14) * CONFIG_HEADERS: Obsolete config.status Use. (line 19) * CONFIG_LINKS: Obsolete config.status Use. (line 24) * CONFIG_SHELL: config.status Invocation. (line 100) * CONFIG_STATUS: config.status Invocation. (line 106) * Copyright Notice: Notices. (line 10) * Copyright Notice <1>: Writing Testsuites. (line 41) * CPPFLAGS: Preset Output Variables. (line 74) * Creating configuration files: Configuration Files. (line 6) * Creating temporary files: Limitations of Usual Tools. (line 616) * Cross compilation: Hosts and Cross-Compilation. (line 6) * CXXFLAGS: Preset Output Variables. (line 96) * Darwin: Systemology. (line 23) * Data structure, set: Set manipulation Macros. (line 6) * datadir: Installation Directory Variables. (line 17) * datarootdir: Changed Directory Variables. (line 6) * datarootdir <1>: Installation Directory Variables. (line 21) * debugging tips: Debugging via autom4te. (line 6) * Declaration, checking: Declarations. (line 6) * Default includes: Default Includes. (line 6) * DEFS: Preset Output Variables. (line 100) * deleting carriage return: Limitations of Usual Tools. (line 1015) * Dependencies between macros: Dependencies Between Macros. (line 6) * descriptors: File Descriptor Macros. (line 6) * Descriptors: File Descriptors. (line 6) * Directories, build: Build Directories. (line 6) * Directories, installation: Installation Directory Variables. (line 6) * division, integer: Signed Integer Division. (line 6) * dnl: Macro Definitions. (line 50) * dnl <1>: Coding Style. (line 42) * docdir: Installation Directory Variables. (line 25) * double-colon rules and VPATH: VPATH and Double-colon. (line 6) * dvidir: Installation Directory Variables. (line 29) * ECHO_C: Preset Output Variables. (line 108) * ECHO_N: Preset Output Variables. (line 109) * ECHO_T: Preset Output Variables. (line 110) * Endianness: C Compiler. (line 172) * environment, macros set from: Command-line Macros and whitespace. (line 6) * Erlang: Erlang Compiler and Interpreter. (line 6) * Erlang, Library, checking: Erlang Libraries. (line 6) * ERLANG_INSTALL_LIB_DIR: Installation Directory Variables. (line 209) * ERLANG_INSTALL_LIB_DIR_LIBRARY: Installation Directory Variables. (line 214) * ERLCFLAGS: Preset Output Variables. (line 122) * exec_prefix: Installation Directory Variables. (line 32) * exiting portably: Exiting Portably. (line 6) * expanded before required: Expanded Before Required. (line 6) * explicit rules, $<, and VPATH: $< in Explicit Rules. (line 6) * External software: External Software. (line 6) * F77: Fortran Compiler. (line 6) * FCFLAGS: Preset Output Variables. (line 128) * FFLAGS: Preset Output Variables. (line 135) * FHS: Site Defaults. (line 84) * file descriptors: File Descriptor Macros. (line 6) * File descriptors: File Descriptors. (line 6) * File system conventions: File System Conventions. (line 6) * File, checking: Files. (line 6) * Filesystem Hierarchy Standard: Site Defaults. (line 84) * floating point: Floating Point Portability. (line 6) * Forbidden patterns: Forbidden Patterns. (line 6) * Fortran: Fortran Compiler. (line 6) * Function, checking: Particular Functions. (line 6) * Gettext: autoreconf Invocation. (line 31) * GNU build system: The GNU Build System. (line 6) * Gnulib: Gnulib. (line 11) * Go: Go Compiler. (line 6) * GOFLAGS: Preset Output Variables. (line 172) * Header portability: Header Portability. (line 6) * Header templates: Header Templates. (line 6) * Header, checking: Header Files. (line 6) * Help strings: Pretty Help Strings. (line 6) * Here-documents: Here-Documents. (line 6) * History of autoconf: History. (line 6) * htmldir: Installation Directory Variables. (line 39) * ifnames: ifnames Invocation. (line 6) * Imake: Why Not Imake. (line 6) * includedir: Installation Directory Variables. (line 42) * Includes, default: Default Includes. (line 6) * indirection, variable name: Polymorphic Variables. (line 6) * infodir: Installation Directory Variables. (line 45) * input: File Descriptor Macros. (line 6) * Install prefix: Default Prefix. (line 6) * Installation directories: Installation Directory Variables. (line 6) * Instantiation: Output. (line 13) * integer overflow: Integer Overflow. (line 6) * integer overflow <1>: Integer Overflow Basics. (line 6) * integer overflow <2>: Signed Overflow Examples. (line 6) * integer overflow <3>: Signed Overflow Advice. (line 6) * Introduction: Introduction. (line 6) * invoking the shell: Invoking the Shell. (line 6) * Korn shell: Shellology. (line 56) * Ksh: Shellology. (line 56) * Language: Language Choice. (line 6) * Large file support: System Services. (line 48) * LDFLAGS: Preset Output Variables. (line 142) * LFS: System Services. (line 48) * lib64: Site Defaults. (line 98) * libdir: Installation Directory Variables. (line 48) * libexecdir: Installation Directory Variables. (line 51) * Library, checking: Libraries. (line 6) * LIBS: Preset Output Variables. (line 156) * Libtool: Libtool. (line 13) * License: Distributing. (line 6) * Limitations of make: Portable Make. (line 6) * Limitations of shell builtins: Limitations of Builtins. (line 6) * Limitations of usual tools: Limitations of Usual Tools. (line 6) * Links: Configuration Links. (line 12) * Links for configuration: Configuration Links. (line 6) * Listing directories: Limitations of Usual Tools. (line 550) * localedir: Installation Directory Variables. (line 54) * localstatedir: Installation Directory Variables. (line 59) * loop induction: Optimization and Wraparound. (line 6) * low-level output: File Descriptor Macros. (line 6) * M4: Programming in M4. (line 6) * M4 quotation: M4 Quotation. (line 6) * M4sugar: Programming in M4sugar. (line 6) * m4sugar debugging tips: Debugging via autom4te. (line 6) * Macros, called once: One-Shot Macros. (line 6) * Macros, obsoleting: Obsoleting Macros. (line 6) * Macros, ordering: Suggested Ordering. (line 6) * Macros, prerequisites: Prerequisite Macros. (line 6) * make -k: make -k Status. (line 6) * make and MAKEFLAGS: The Make Macro MAKEFLAGS. (line 6) * make and SHELL: The Make Macro SHELL. (line 6) * Makefile macros and comments: Comments in Make Macros. (line 6) * Makefile macros and whitespace: Trailing whitespace in Make Macros. (line 6) * Makefile rules and comments: Comments in Make Rules. (line 6) * Makefile rules and newlines: Newlines in Make Rules. (line 6) * Makefile substitutions: Makefile Substitutions. (line 6) * MAKEFLAGS and make: The Make Macro MAKEFLAGS. (line 6) * Making directories: Limitations of Usual Tools. (line 572) * mandir: Installation Directory Variables. (line 71) * Messages, from configure: Printing Messages. (line 6) * Messages, from M4sugar: Diagnostic Macros. (line 6) * Moving open files: Limitations of Usual Tools. (line 641) * newline, deleting: Limitations of Usual Tools. (line 1015) * Newlines in Makefile rules: Newlines in Make Rules. (line 6) * Notices in configure: Notices. (line 6) * null pointers: Null Pointers. (line 6) * obj/, subdirectory: obj/ and Make. (line 6) * OBJCFLAGS: Preset Output Variables. (line 164) * OBJCXXFLAGS: Preset Output Variables. (line 168) * Obsolete constructs: Obsolete Constructs. (line 6) * Obsoleting macros: Obsoleting Macros. (line 6) * obstack: Particular Functions. (line 329) * oldincludedir: Installation Directory Variables. (line 74) * One-shot macros: One-Shot Macros. (line 6) * Options, package: Package Options. (line 6) * Options, Package: Option Checking. (line 6) * Ordering macros: Suggested Ordering. (line 6) * Output variables: Preset Output Variables. (line 6) * Output variables <1>: Setting Output Variables. (line 6) * Output variables, special characters in: Special Chars in Variables. (line 6) * output, low-level: File Descriptor Macros. (line 6) * Outputting files: Output. (line 6) * overflow, signed integer: Integer Overflow. (line 6) * overflow, signed integer <1>: Integer Overflow Basics. (line 6) * overflow, signed integer <2>: Signed Overflow Examples. (line 6) * overflow, signed integer <3>: Signed Overflow Advice. (line 6) * Package options: Package Options. (line 6) * package.m4: Making testsuite Scripts. (line 13) * Parallel make: Parallel Make. (line 6) * parentheses, balancing: Balancing Parentheses. (line 6) * Patterns, forbidden: Forbidden Patterns. (line 6) * pdfdir: Installation Directory Variables. (line 77) * polymorphic variable name: Polymorphic Variables. (line 6) * portability: Varieties of Unportability. (line 6) * Portability of C functions: Function Portability. (line 6) * Portability of headers: Header Portability. (line 6) * Portable C and C++ programming: Portable C and C++. (line 6) * Portable shell programming: Portable Shell. (line 6) * positional parameters: Shell Substitutions. (line 120) * Posix termios headers: System Services. (line 76) * Precious Variable: Setting Output Variables. (line 66) * prefix: Installation Directory Variables. (line 80) * Prefix for install: Default Prefix. (line 6) * preprocessor arithmetic: Preprocessor Arithmetic. (line 6) * Preprocessors: Compilers and Preprocessors. (line 6) * prerequisite directories and VPATH: Tru64 Directory Magic. (line 6) * Prerequisite macros: Prerequisite Macros. (line 6) * Program names, transforming: Transforming Names. (line 6) * Programs, checking: Alternative Programs. (line 6) * psdir: Installation Directory Variables. (line 85) * QNX 4.25: Systemology. (line 37) * quadrigraphs: Quadrigraphs. (line 6) * quotation: Autoconf Language. (line 6) * quotation <1>: M4 Quotation. (line 6) * Remaking automatically: Automatic Remaking. (line 6) * Revision: Notices. (line 18) * Rule, Single Suffix Inference: Single Suffix Rules. (line 6) * runstatedir: Installation Directory Variables. (line 63) * sbindir: Installation Directory Variables. (line 88) * Separated Dependencies: Single Suffix Rules. (line 9) * set -b: Limitations of Builtins. (line 741) * set -e: Limitations of Builtins. (line 658) * set -m: Limitations of Builtins. (line 741) * set -n: Limitations of Builtins. (line 765) * Set manipulation: Set manipulation Macros. (line 6) * sharedstatedir: Installation Directory Variables. (line 92) * SHELL and make: The Make Macro SHELL. (line 6) * Shell assignments: Assignments. (line 6) * Shell builtins: Limitations of Builtins. (line 6) * Shell file descriptors: File Descriptors. (line 6) * Shell Functions: Shell Functions. (line 6) * Shell here-documents: Here-Documents. (line 6) * shell invocation: Invoking the Shell. (line 6) * Shell parentheses: Parentheses. (line 6) * Shell pattern matching: Shell Pattern Matching. (line 6) * Shell slashes: Slashes. (line 6) * Shell substitutions: Shell Substitutions. (line 6) * Shell variables: Special Shell Variables. (line 6) * Shellology: Shellology. (line 6) * Signal handling in the shell: Signal Handling. (line 6) * Signals, shells and: Signal Handling. (line 6) * signed integer overflow: Integer Overflow. (line 6) * signed integer overflow <1>: Integer Overflow Basics. (line 6) * signed integer overflow <2>: Signed Overflow Examples. (line 6) * signed integer overflow <3>: Signed Overflow Advice. (line 6) * Single Suffix Inference Rule: Single Suffix Rules. (line 6) * Site defaults: Site Defaults. (line 6) * Site details: Site Details. (line 6) * Special shell variables: Special Shell Variables. (line 6) * srcdir: Configuration Actions. (line 71) * srcdir <1>: Preset Output Variables. (line 197) * standard input: File Descriptor Macros. (line 6) * Standard symbols: Standard Symbols. (line 6) * Structure, checking: Structures. (line 6) * Subdirectory configure: Subdirectories. (line 6) * Substitutions in makefiles: Makefile Substitutions. (line 6) * Symbolic links: Limitations of Usual Tools. (line 538) * sysconfdir: Installation Directory Variables. (line 96) * System type: Specifying Target Triplets. (line 6) * System type <1>: Canonicalizing. (line 6) * Systemology: Systemology. (line 6) * Target triplet: Specifying Target Triplets. (line 6) * termios Posix headers: System Services. (line 76) * test group: testsuite Scripts. (line 12) * testsuite: testsuite Scripts. (line 6) * testsuite <1>: testsuite Invocation. (line 6) * timestamp resolution: Limitations of Usual Tools. (line 221) * timestamp resolution <1>: Limitations of Usual Tools. (line 1002) * timestamp resolution <2>: Timestamps and Make. (line 6) * tmp: Configuration Actions. (line 89) * top_builddir: Preset Output Variables. (line 182) * top_build_prefix: Preset Output Variables. (line 186) * top_srcdir: Preset Output Variables. (line 204) * Transforming program names: Transforming Names. (line 6) * Types: Types. (line 6) * unbalanced parentheses, managing: Balancing Parentheses. (line 6) * undefined macro: New Macros. (line 6) * Unix version 7: Systemology. (line 44) * Unordered set manipulation: Set manipulation Macros. (line 6) * Upgrading autoconf: Autoconf 1. (line 6) * Upgrading autoconf <1>: Autoconf 2.13. (line 6) * V7: Systemology. (line 44) * variable name indirection: Polymorphic Variables. (line 6) * variable names, composing: Polymorphic Variables. (line 128) * Variable, Precious: Setting Output Variables. (line 66) * variables and VPATH: Variables listed in VPATH. (line 6) * Version: Versioning. (line 11) * version, Autoconf: Versioning. (line 6) * volatile objects: Volatile Objects. (line 6) * VPATH: VPATH and Make. (line 6) * VPATH and automatic rule rewriting: Automatic Rule Rewriting. (line 6) * VPATH and double-colon rules: VPATH and Double-colon. (line 6) * VPATH and prerequisite directories: Tru64 Directory Magic. (line 6) * VPATH and variables: Variables listed in VPATH. (line 6) * VPATH, explicit rules, and $<: $< in Explicit Rules. (line 6) * VPATH, resolving target pathnames: Make Target Lookup. (line 6) * whitespace in command-line macros: Command-line Macros and whitespace. (line 6) * whitespace in Makefile macros: Trailing whitespace in Make Macros. (line 6) * wraparound arithmetic: Integer Overflow. (line 6) * wraparound arithmetic <1>: Integer Overflow Basics. (line 6) * wraparound arithmetic <2>: Signed Overflow Examples. (line 6) * wraparound arithmetic <3>: Signed Overflow Advice. (line 6) * X Window System: System Services. (line 10) * Zsh: Shellology. (line 88)  Tag Table: Node: Top1986 Node: Introduction21557 Node: The GNU Build System28121 Node: Automake29102 Node: Gnulib31140 Node: Libtool32558 Node: Pointers33985 Ref: Pointers-Footnote-135306 Node: Making configure Scripts35470 Node: Writing Autoconf Input38915 Node: Shell Script Compiler40507 Node: Autoconf Language42924 Node: Autoconf Input Layout50517 Node: autoscan Invocation51961 Node: ifnames Invocation54657 Node: autoconf Invocation55925 Node: autoreconf Invocation61378 Node: Setup67241 Node: Initializing configure68573 Ref: AC_INIT69110 Node: Versioning75067 Node: Notices76960 Node: Input78190 Ref: AC_CONFIG_SRCDIR78467 Node: Output84606 Ref: AC_OUTPUT85057 Ref: AC_PROG_MAKE_SET86764 Node: Configuration Actions87233 Node: Configuration Files92708 Ref: AC_CONFIG_FILES92969 Node: Makefile Substitutions94314 Node: Preset Output Variables96122 Node: Installation Directory Variables106156 Node: Changed Directory Variables114725 Node: Build Directories117375 Node: Automatic Remaking119531 Node: Configuration Headers121775 Node: Header Templates125563 Node: autoheader Invocation128421 Node: Autoheader Macros132869 Node: Configuration Commands134922 Ref: AC_CONFIG_COMMANDS135446 Node: Configuration Links136859 Ref: AC_CONFIG_LINKS137314 Node: Subdirectories138407 Node: Default Prefix140829 Ref: AC_PREFIX_PROGRAM141764 Node: Existing Tests142327 Node: Common Behavior144134 Node: Standard Symbols144773 Node: Default Includes145370 Node: Alternative Programs149699 Node: Particular Programs150387 Ref: AC_PROG_LEX156465 Ref: AC_PROG_LN_S161165 Node: Generic Programs162896 Ref: AC_CHECK_PROG163890 Ref: AC_CHECK_PROGS164628 Ref: AC_PATH_PROG168738 Ref: AC_PATH_PROGS169124 Node: Files172295 Node: Libraries173581 Ref: AC_CHECK_LIB173822 Ref: AC_SEARCH_LIBS176141 Node: Library Functions177363 Node: Function Portability177988 Node: Particular Functions187747 Ref: AC_FUNC_ALLOCA188079 Ref: AC_FUNC_CLOSEDIR_VOID190110 Ref: AC_FUNC_FORK192197 Ref: AC_FUNC_GETLOADAVG194583 Ref: AC_FUNC_GETMNTENT196226 Ref: AC_FUNC_MMAP201198 Ref: AC_FUNC_STRCOLL205052 Ref: AC_FUNC_STRFTIME206355 Ref: AC_FUNC_UTIME_NULL208030 Ref: AC_FUNC_VPRINTF208396 Node: Generic Functions209702 Ref: AC_CHECK_FUNC210236 Ref: AC_CHECK_FUNCS210877 Node: Header Files215661 Node: Header Portability216296 Node: Particular Headers220043 Ref: AC_HEADER_DIRENT221132 Ref: AC_HEADER_MAJOR222734 Ref: AC_HEADER_STAT224720 Ref: AC_HEADER_STDC226302 Node: Generic Headers228556 Ref: AC_CHECK_HEADER228960 Ref: AC_CHECK_HEADERS230373 Node: Declarations233153 Node: Particular Declarations233753 Node: Generic Declarations233977 Ref: AC_CHECK_DECLS235378 Node: Structures237978 Node: Particular Structures238597 Ref: AC_STRUCT_ST_BLOCKS239718 Ref: AC_STRUCT_TIMEZONE240470 Node: Generic Structures240827 Ref: AC_CHECK_MEMBERS241830 Node: Types242679 Node: Particular Types243201 Ref: AC_TYPE_GETGROUPS243664 Ref: AC_TYPE_MODE_T246878 Ref: AC_TYPE_OFF_T247069 Ref: AC_TYPE_PID_T247257 Ref: AC_TYPE_SIZE_T247445 Ref: AC_TYPE_UID_T247830 Node: Generic Types249579 Node: Compilers and Preprocessors251787 Node: Specific Compiler Characteristics253099 Node: Generic Compiler Characteristics254233 Ref: AC_CHECK_SIZEOF254473 Node: C Compiler260446 Ref: AC_PROG_CC262838 Ref: AC_PROG_CC_C_O266415 Ref: AC_C_BIGENDIAN268416 Ref: AC_C_CONST270274 Ref: AC_C_INLINE273907 Ref: AC_C_CHAR_UNSIGNED274150 Ref: AC_PROG_GCC_TRADITIONAL277121 Node: C++ Compiler277546 Node: Objective C Compiler282027 Node: Objective C++ Compiler283644 Node: Erlang Compiler and Interpreter285352 Node: Fortran Compiler287497 Node: Go Compiler313796 Node: System Services314889 Ref: AC_PATH_X315140 Ref: AC_PATH_XTRA316186 Ref: AC_SYS_INTERPRETER316790 Ref: AC_SYS_LONG_FILE_NAMES318424 Node: C and Posix Variants318817 Ref: AC_USE_SYSTEM_EXTENSIONS319200 Node: Erlang Libraries322668 Node: Writing Tests327779 Node: Language Choice329809 Ref: AC_LANG330314 Ref: AC_LANG_PUSH332337 Ref: Language Choice-Footnote-1334260 Node: Writing Test Programs334416 Node: Guidelines334994 Node: Test Functions337344 Node: Generating Sources338766 Node: Running the Preprocessor344976 Ref: AC_PREPROC_IFELSE345712 Ref: AC_EGREP_HEADER347702 Ref: AC_EGREP_CPP348087 Node: Running the Compiler349249 Node: Running the Linker351054 Ref: AC_LINK_IFELSE352228 Node: Runtime353131 Ref: AC_RUN_IFELSE353912 Node: Systemology358879 Node: Multiple Cases361155 Node: Results362874 Node: Defining Symbols363709 Node: Setting Output Variables368736 Node: Special Chars in Variables374921 Node: Caching Results376217 Node: Cache Variable Names380023 Node: Cache Files381717 Node: Cache Checkpointing384116 Node: Printing Messages385514 Ref: AC_MSG_RESULT387089 Ref: AC_MSG_NOTICE387619 Ref: AC_MSG_ERROR387995 Ref: AC_MSG_WARN388878 Node: Programming in M4389317 Node: M4 Quotation390130 Node: Active Characters391099 Ref: Active Characters-Footnote-1392530 Ref: Active Characters-Footnote-2392660 Node: One Macro Call392686 Node: Quoting and Parameters394276 Node: Quotation and Nested Macros396662 Node: Changequote is Evil399772 Node: Quadrigraphs402395 Node: Balancing Parentheses405225 Node: Quotation Rule Of Thumb409903 Node: Using autom4te412855 Ref: Using autom4te-Footnote-1413522 Node: autom4te Invocation413571 Node: Customizing autom4te422498 Node: Programming in M4sugar423835 Node: Redefined M4 Macros425028 Node: Diagnostic Macros433553 Ref: m4_fatal434322 Ref: m4_warn434565 Node: Diversion support436187 Ref: m4_divert_text442865 Node: Conditional constructs444178 Node: Looping constructs451122 Ref: m4_foreach_w454800 Node: Evaluation Macros462317 Node: Text processing Macros471137 Node: Number processing Macros480966 Ref: m4_version_compare482964 Node: Set manipulation Macros485319 Node: Forbidden Patterns494589 Node: Debugging via autom4te497744 Node: Programming in M4sh499595 Node: Common Shell Constructs500997 Node: Polymorphic Variables509762 Node: Initialization Macros519530 Node: File Descriptor Macros525257 Ref: AS_MESSAGE_LOG_FD526442 Node: Writing Autoconf Macros528008 Node: Macro Definitions528872 Node: Macro Names532640 Node: Dependencies Between Macros536519 Node: Prerequisite Macros537207 Node: Suggested Ordering543952 Node: One-Shot Macros545551 Node: Obsoleting Macros546940 Ref: AU_DEFUN547701 Node: Coding Style550136 Node: Portable Shell558134 Node: Shellology562509 Node: Invoking the Shell566959 Node: Here-Documents568163 Node: File Descriptors571908 Node: Signal Handling578918 Node: File System Conventions584242 Node: Shell Pattern Matching590326 Node: Shell Substitutions590906 Node: Assignments609588 Node: Parentheses611520 Node: Slashes612512 Node: Special Shell Variables613380 Node: Shell Functions627282 Node: Limitations of Builtins630776 Ref: case634921 Ref: echo640278 Ref: export647278 Ref: if653640 Ref: set658025 Ref: trap670498 Ref: unset675235 Node: Limitations of Usual Tools676390 Ref: awk676689 Ref: grep695800 Ref: mkdir701842 Ref: sed708393 Ref: touch720516 Node: Portable Make723902 Node: $< in Ordinary Make Rules725601 Node: Failure in Make Rules726087 Node: Special Chars in Names727159 Node: Backslash-Newline-Empty728153 Node: Backslash-Newline Comments729198 Node: Long Lines in Makefiles730099 Node: Macros and Submakes730484 Node: The Make Macro MAKEFLAGS733259 Node: The Make Macro SHELL734180 Node: Parallel Make736797 Node: Comments in Make Rules740577 Node: Newlines in Make Rules741799 Node: Comments in Make Macros742857 Node: Trailing whitespace in Make Macros744103 Node: Command-line Macros and whitespace744874 Node: obj/ and Make745548 Node: make -k Status746217 Node: VPATH and Make746847 Node: Variables listed in VPATH748241 Node: VPATH and Double-colon748804 Node: $< in Explicit Rules749226 Node: Automatic Rule Rewriting749705 Node: Tru64 Directory Magic756635 Node: Make Target Lookup757485 Node: Single Suffix Rules762104 Node: Timestamps and Make763474 Node: Portable C and C++765204 Node: Varieties of Unportability766858 Node: Integer Overflow768971 Node: Integer Overflow Basics769994 Node: Signed Overflow Examples771752 Node: Optimization and Wraparound775310 Node: Signed Overflow Advice778344 Node: Signed Integer Division781050 Node: Preprocessor Arithmetic781673 Node: Null Pointers782472 Node: Buffer Overruns783122 Node: Volatile Objects786064 Node: Floating Point Portability791690 Node: Exiting Portably792199 Node: Manual Configuration793693 Node: Specifying Target Triplets794971 Ref: Specifying Names795144 Node: Canonicalizing798754 Node: Using System Type800893 Node: Site Configuration803760 Node: Help Formatting804752 Node: External Software805720 Ref: AC_ARG_WITH807326 Node: Package Options812177 Ref: AC_ARG_ENABLE813684 Node: Pretty Help Strings815330 Ref: AS_HELP_STRING815944 Node: Option Checking818325 Node: Site Details820138 Node: Transforming Names821375 Node: Transformation Options822489 Node: Transformation Examples822986 Node: Transformation Rules824784 Node: Site Defaults826362 Node: Running configure Scripts831953 Node: Basic Installation833042 Node: Compilers and Options837300 Node: Multiple Architectures837966 Node: Installation Names839604 Node: Optional Features842598 Node: Particular Systems844034 Node: System Type845515 Node: Sharing Defaults846883 Node: Defining Variables847562 Node: configure Invocation848480 Node: config.status Invocation850310 Ref: CONFIG_SHELL854210 Node: Obsolete Constructs855414 Node: Obsolete config.status Use856389 Node: acconfig Header858243 Node: autoupdate Invocation860378 Node: Obsolete Macros862350 Ref: AC_FUNC_SETVBUF_REVERSED870437 Ref: AC_HEADER_TIME872589 Ref: AC_TYPE_SIGNAL888440 Node: Autoconf 1892166 Node: Changed File Names893250 Node: Changed Makefiles894048 Node: Changed Macros895196 Node: Changed Results896493 Node: Changed Macro Writing898652 Node: Autoconf 2.13899960 Node: Changed Quotation901182 Node: New Macros903114 Node: Hosts and Cross-Compilation904945 Node: AC_LIBOBJ vs LIBOBJS909277 Node: AC_ACT_IFELSE vs AC_TRY_ACT910968 Ref: AC_FOO_IFELSE vs AC_TRY_FOO911165 Node: Using Autotest913047 Node: Using an Autotest Test Suite915445 Node: testsuite Scripts915736 Node: Autotest Logs920336 Node: Writing Testsuites922721 Node: testsuite Invocation942922 Node: Making testsuite Scripts948535 Node: FAQ954893 Node: Distributing955769 Node: Why GNU M4956852 Node: Bootstrapping957721 Node: Why Not Imake958343 Node: Defining Directories963161 Node: Autom4te Cache965363 Node: Present But Cannot Be Compiled967309 Node: Expanded Before Required970203 Node: Debugging975247 Node: History980413 Node: Genesis981288 Node: Exodus982495 Node: Leviticus985612 Node: Numbers987150 Node: Deuteronomy989107 Node: GNU Free Documentation License991845 Node: Indices1017196 Node: Environment Variable Index1017915 Node: Output Variable Index1029255 Node: Preprocessor Symbol Index1046920 Node: Cache Variable Index1068116 Node: Autoconf Macro Index1079295 Node: M4 Macro Index1113979 Node: Autotest Macro Index1134729 Node: Program & Function Index1137278 Node: Concept Index1160325  End Tag Table  Local Variables: coding: utf-8 End: autoconf-2.71/doc/standards.info0000644000000000000000000074063014004623450013565 00000000000000This is standards.info, produced by makeinfo version 6.7 from standards.texi. The GNU coding standards, last updated June 12, 2020. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Free Software Foundation, Inc. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitled "GNU Free Documentation License". INFO-DIR-SECTION GNU organization START-INFO-DIR-ENTRY * Standards: (standards). GNU coding standards. END-INFO-DIR-ENTRY  File: standards.info, Node: Top, Next: Preface, Up: (dir) GNU Coding Standards ******************** The GNU coding standards, last updated June 12, 2020. Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Free Software Foundation, Inc. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitled "GNU Free Documentation License". * Menu: * Preface:: About the GNU Coding Standards. * Legal Issues:: Keeping free software free. * Design Advice:: General program design. * Program Behavior:: Program behavior for all programs * Writing C:: Making the best use of C. * Documentation:: Documenting programs. * Managing Releases:: The release process. * References:: Mentioning non-free software or documentation. * GNU Free Documentation License:: Copying and sharing this manual. * Index::  File: standards.info, Node: Preface, Next: Legal Issues, Prev: Top, Up: Top 1 About the GNU Coding Standards ******************************** The GNU Coding Standards were written by Richard Stallman and other GNU Project volunteers. Their purpose is to make the GNU system clean, consistent, and easy to install. This document can also be read as a guide to writing portable, robust and reliable programs. It focuses on programs written in C, but many of the rules and principles are useful even if you write in another programming language. The rules often state reasons for writing in a certain way. If you did not obtain this file directly from the GNU project and recently, please check for a newer version. You can get the GNU Coding Standards from the GNU web server in many different formats, including the Texinfo source, PDF, HTML, DVI, plain text, and more, at: . If you are maintaining an official GNU package, in addition to this document, please read and follow the GNU maintainer information (*note Contents: (maintain)Top.). If you want to receive diffs for every change to these GNU documents, join the mailing list 'gnustandards-commit@gnu.org', via the web interface at . Archives are also available there. Please send corrections or suggestions for this document to . If you make a suggestion, please include a suggested new wording for it, to help us consider the suggestion efficiently. We prefer a context diff to the Texinfo source, but if that's difficult for you, you can make a context diff for some other version of this document, or propose it in any way that makes it clear. The source repository for this document can be found at . These standards cover the minimum of what is important when writing a GNU package. Likely, the need for additional standards will come up. Sometimes, you might suggest that such standards be added to this document. If you think your standards would be generally useful, please do suggest them. You should also set standards for your package on many questions not addressed or not firmly specified here. The most important point is to be self-consistent--try to stick to the conventions you pick, and try to document them as much as possible. That way, your program will be more maintainable by others. The GNU Hello program serves as an example of how to follow the GNU coding standards for a trivial program. . This release of the GNU Coding Standards was last updated June 12, 2020.  File: standards.info, Node: Legal Issues, Next: Design Advice, Prev: Preface, Up: Top 2 Keeping Free Software Free **************************** This chapter discusses how you can make sure that GNU software avoids legal difficulties, and other related issues. * Menu: * Reading Non-Free Code:: Referring to proprietary programs. * Contributions:: Accepting contributions. * Trademarks:: How we deal with trademark issues.  File: standards.info, Node: Reading Non-Free Code, Next: Contributions, Up: Legal Issues 2.1 Referring to Proprietary Programs ===================================== Don't in any circumstances refer to Unix source code for or during your work on GNU! (Or to any other proprietary programs.) If you have a vague recollection of the internals of a Unix program, this does not absolutely mean you can't write an imitation of it, but do try to organize the imitation internally along different lines, because this is likely to make the details of the Unix version irrelevant and dissimilar to your results. For example, Unix utilities were generally optimized to minimize memory use; if you go for speed instead, your program will be very different. You could keep the entire input file in memory and scan it there instead of using stdio. Use a smarter algorithm discovered more recently than the Unix program. Eliminate use of temporary files. Do it in one pass instead of two (we did this in the assembler). Or, on the contrary, emphasize simplicity instead of speed. For some applications, the speed of today's computers makes simpler algorithms adequate. Or go for generality. For example, Unix programs often have static tables or fixed-size strings, which make for arbitrary limits; use dynamic allocation instead. Make sure your program handles NULs and other funny characters in the input files. Add a programming language for extensibility and write part of the program in that language. Or turn some parts of the program into independently usable libraries. Or use a simple garbage collector instead of tracking precisely when to free memory, or use a new GNU facility such as obstacks.  File: standards.info, Node: Contributions, Next: Trademarks, Prev: Reading Non-Free Code, Up: Legal Issues 2.2 Accepting Contributions =========================== If the program you are working on is copyrighted by the Free Software Foundation, then when someone else sends you a piece of code to add to the program, we need legal papers to use it--just as we asked you to sign papers initially. _Each_ person who makes a nontrivial contribution to a program must sign some sort of legal papers in order for us to have clear title to the program; the main author alone is not enough. So, before adding in any contributions from other people, please tell us, so we can arrange to get the papers. Then wait until we tell you that we have received the signed papers, before you actually use the contribution. This applies both before you release the program and afterward. If you receive diffs to fix a bug, and they make significant changes, we need legal papers for that change. This also applies to comments and documentation files. For copyright law, comments and code are just text. Copyright applies to all kinds of text, so we need legal papers for all kinds. We know it is frustrating to ask for legal papers; it's frustrating for us as well. But if you don't wait, you are going out on a limb--for example, what if the contributor's employer won't sign a disclaimer? You might have to take that code out again! You don't need papers for changes of a few lines here or there, since they are not significant for copyright purposes. Also, you don't need papers if all you get from the suggestion is some ideas, not actual code which you use. For example, if someone sent you one implementation, but you write a different implementation of the same idea, you don't need to get papers. The very worst thing is if you forget to tell us about the other contributor. We could be very embarrassed in court some day as a result. We have more detailed advice for maintainers of GNU packages. If you have reached the stage of maintaining a GNU program (whether released or not), please take a look: *note (maintain)Legal Matters::.  File: standards.info, Node: Trademarks, Prev: Contributions, Up: Legal Issues 2.3 Trademarks ============== Please do not include any trademark acknowledgments in GNU software packages or documentation. Trademark acknowledgments are the statements that such-and-such is a trademark of so-and-so. The GNU Project has no objection to the basic idea of trademarks, but these acknowledgments feel like kowtowing, and there is no legal requirement for them, so we don't use them. What is legally required, as regards other people's trademarks, is to avoid using them in ways which a reader might reasonably understand as naming or labeling our own programs or activities. For example, since "Objective C" is (or at least was) a trademark, we made sure to say that we provide a "compiler for the Objective C language" rather than an "Objective C compiler". The latter would have been meant as a shorter way of saying the former, but it does not explicitly state the relationship, so it could be misinterpreted as using "Objective C" as a label for the compiler rather than for the language. Please don't use "win" as an abbreviation for Microsoft Windows in GNU software or documentation. In hacker terminology, calling something a "win" is a form of praise. You're free to praise Microsoft Windows on your own if you want, but please don't do so in GNU packages. Please write "Windows" in full, or abbreviate it to "w." *Note System Portability::.  File: standards.info, Node: Design Advice, Next: Program Behavior, Prev: Legal Issues, Up: Top 3 General Program Design ************************ This chapter discusses some of the issues you should take into account when designing your program. * Menu: * Source Language:: Which languages to use. * Compatibility:: Compatibility with other implementations. * Using Extensions:: Using non-standard features. * Standard C:: Using standard C features. * Conditional Compilation:: Compiling code only if a conditional is true.  File: standards.info, Node: Source Language, Next: Compatibility, Up: Design Advice 3.1 Which Languages to Use ========================== When you want to use a language that gets compiled and runs at high speed, the best language to use is C. C++ is ok too, but please don't make heavy use of templates. So is Java, if you compile it. When highest efficiency is not required, other languages commonly used in the free software community, such as Lisp, Scheme, Python, Ruby, and Java, are OK too. Scheme, as implemented by GNU Guile, plays a particular role in the GNU System: it is the preferred language to extend programs written in C/C++, and also a fine language for a wide range of applications. The more GNU components use Guile and Scheme, the more users are able to extend and combine them (*note (guile)The Emacs Thesis::). Many programs are designed to be extensible: they include an interpreter for a language that is higher level than C. Often much of the program is written in that language, too. The Emacs editor pioneered this technique. The standard extensibility interpreter for GNU software is Guile (), which implements the language Scheme (an especially clean and simple dialect of Lisp). Guile also includes bindings for GTK+/GNOME, making it practical to write modern GUI functionality within Guile. We don't reject programs written in other "scripting languages" such as Perl and Python, but using Guile is the path that will lead to overall consistency of the GNU system.  File: standards.info, Node: Compatibility, Next: Using Extensions, Prev: Source Language, Up: Design Advice 3.2 Compatibility with Other Implementations ============================================ With occasional exceptions, utility programs and libraries for GNU should be upward compatible with those in Berkeley Unix, and upward compatible with Standard C if Standard C specifies their behavior, and upward compatible with POSIX if POSIX specifies their behavior. When these standards conflict, it is useful to offer compatibility modes for each of them. Standard C and POSIX prohibit many kinds of extensions. Feel free to make the extensions anyway, and include a '--ansi', '--posix', or '--compatible' option to turn them off. However, if the extension has a significant chance of breaking any real programs or scripts, then it is not really upward compatible. So you should try to redesign its interface to make it upward compatible. Many GNU programs suppress extensions that conflict with POSIX if the environment variable 'POSIXLY_CORRECT' is defined (even if it is defined with a null value). Please make your program recognize this variable if appropriate. When a feature is used only by users (not by programs or command files), and it is done poorly in Unix, feel free to replace it completely with something totally different and better. (For example, 'vi' is replaced with Emacs.) But it is nice to offer a compatible feature as well. (There is a free 'vi' clone, so we offer it.) Additional useful features are welcome regardless of whether there is any precedent for them.  File: standards.info, Node: Using Extensions, Next: Standard C, Prev: Compatibility, Up: Design Advice 3.3 Using Non-standard Features =============================== Many GNU facilities that already exist support a number of convenient extensions over the comparable Unix facilities. Whether to use these extensions in implementing your program is a difficult question. On the one hand, using the extensions can make a cleaner program. On the other hand, people will not be able to build the program unless the other GNU tools are available. This might cause the program to work on fewer kinds of machines. With some extensions, it might be easy to provide both alternatives. For example, you can define functions with a "keyword" 'INLINE' and define that as a macro to expand into either 'inline' or nothing, depending on the compiler. In general, perhaps it is best not to use the extensions if you can straightforwardly do without them, but to use the extensions if they are a big improvement. An exception to this rule are the large, established programs (such as Emacs) which run on a great variety of systems. Using GNU extensions in such programs would make many users unhappy, so we don't do that. Another exception is for programs that are used as part of compilation: anything that must be compiled with other compilers in order to bootstrap the GNU compilation facilities. If these require the GNU compiler, then no one can compile them without having them installed already. That would be extremely troublesome in certain cases.  File: standards.info, Node: Standard C, Next: Conditional Compilation, Prev: Using Extensions, Up: Design Advice 3.4 Standard C and Pre-Standard C ================================= 1989 Standard C is widespread enough now that it is ok to use its features in programs. There is one exception: do not ever use the "trigraph" feature of Standard C. The 1999 and 2011 editions of Standard C are not fully supported on all platforms. If you aim to support compilation by compilers other than GCC, you should not require these C features in your programs. It is ok to use these features conditionally when the compiler supports them. If your program is only meant to compile with GCC, then you can use these features if GCC supports them, when they give substantial benefit. However, it is easy to support pre-standard compilers in most programs, so if you know how to do that, feel free. To support pre-standard C, instead of writing function definitions in standard prototype form, int foo (int x, int y) ... write the definition in pre-standard style like this, int foo (x, y) int x, y; ... and use a separate declaration to specify the argument prototype: int foo (int, int); You need such a declaration anyway, in a header file, to get the benefit of prototypes in all the files where the function is called. And once you have the declaration, you normally lose nothing by writing the function definition in the pre-standard style. This technique does not work for integer types narrower than 'int'. If you think of an argument as being of a type narrower than 'int', declare it as 'int' instead. There are a few special cases where this technique is hard to use. For example, if a function argument needs to hold the system type 'dev_t', you run into trouble, because 'dev_t' is shorter than 'int' on some machines; but you cannot use 'int' instead, because 'dev_t' is wider than 'int' on some machines. There is no type you can safely use on all machines in a non-standard definition. The only way to support non-standard C and pass such an argument is to check the width of 'dev_t' using Autoconf and choose the argument type accordingly. This may not be worth the trouble. In order to support pre-standard compilers that do not recognize prototypes, you may want to use a preprocessor macro like this: /* Declare the prototype for a general external function. */ #if defined (__STDC__) || defined (WINDOWSNT) #define P_(proto) proto #else #define P_(proto) () #endif  File: standards.info, Node: Conditional Compilation, Prev: Standard C, Up: Design Advice 3.5 Conditional Compilation =========================== When supporting configuration options already known when building your program we prefer using 'if (... )' over conditional compilation, as in the former case the compiler is able to perform more extensive checking of all possible code paths. For example, please write if (HAS_FOO) ... else ... instead of: #ifdef HAS_FOO ... #else ... #endif A modern compiler such as GCC will generate exactly the same code in both cases, and we have been using similar techniques with good success in several projects. Of course, the former method assumes that 'HAS_FOO' is defined as either 0 or 1. While this is not a silver bullet solving all portability problems, and is not always appropriate, following this policy would have saved GCC developers many hours, or even days, per year. In the case of function-like macros like 'REVERSIBLE_CC_MODE' in GCC which cannot be simply used in 'if (...)' statements, there is an easy workaround. Simply introduce another macro 'HAS_REVERSIBLE_CC_MODE' as in the following example: #ifdef REVERSIBLE_CC_MODE #define HAS_REVERSIBLE_CC_MODE 1 #else #define HAS_REVERSIBLE_CC_MODE 0 #endif  File: standards.info, Node: Program Behavior, Next: Writing C, Prev: Design Advice, Up: Top 4 Program Behavior for All Programs *********************************** This chapter describes conventions for writing robust software. It also describes general standards for error messages, the command line interface, and how libraries should behave. * Menu: * Non-GNU Standards:: We consider standards such as POSIX; we don't "obey" them. * Semantics:: Writing robust programs. * Libraries:: Library behavior. * Errors:: Formatting error messages. * User Interfaces:: Standards about interfaces generally. * Finding Program Files:: How to find the program's executable and other files that go with it. * Graphical Interfaces:: Standards for graphical interfaces. * Command-Line Interfaces:: Standards for command line interfaces. * Dynamic Plug-In Interfaces:: Standards for dynamic plug-in interfaces. * Option Table:: Table of long options. * OID Allocations:: Table of OID slots for GNU. * Memory Usage:: When and how to care about memory needs. * File Usage:: Which files to use, and where.  File: standards.info, Node: Non-GNU Standards, Next: Semantics, Up: Program Behavior 4.1 Non-GNU Standards ===================== The GNU Project regards standards published by other organizations as suggestions, not orders. We consider those standards, but we do not "obey" them. In developing a GNU program, you should implement an outside standard's specifications when that makes the GNU system better overall in an objective sense. When it doesn't, you shouldn't. In most cases, following published standards is convenient for users--it means that their programs or scripts will work more portably. For instance, GCC implements nearly all the features of Standard C as specified by that standard. C program developers would be unhappy if it did not. And GNU utilities mostly follow specifications of POSIX.2; shell script writers and users would be unhappy if our programs were incompatible. But we do not follow either of these specifications rigidly, and there are specific points on which we decided not to follow them, so as to make the GNU system better for users. For instance, Standard C says that nearly all extensions to C are prohibited. How silly! GCC implements many extensions, some of which were later adopted as part of the standard. If you want these constructs to give an error message as "required" by the standard, you must specify '--pedantic', which was implemented only so that we can say "GCC is a 100% implementation of the standard", not because there is any reason to actually use it. POSIX.2 specifies that 'df' and 'du' must output sizes by default in units of 512 bytes. What users want is units of 1k, so that is what we do by default. If you want the ridiculous behavior "required" by POSIX, you must set the environment variable 'POSIXLY_CORRECT' (which was originally going to be named 'POSIX_ME_HARDER'). GNU utilities also depart from the letter of the POSIX.2 specification when they support long-named command-line options, and intermixing options with ordinary arguments. This minor incompatibility with POSIX is never a problem in practice, and it is very useful. In particular, don't reject a new feature, or remove an old one, merely because a standard says it is "forbidden" or "deprecated".  File: standards.info, Node: Semantics, Next: Libraries, Prev: Non-GNU Standards, Up: Program Behavior 4.2 Writing Robust Programs =========================== Avoid arbitrary limits on the length or number of _any_ data structure, including file names, lines, files, and symbols, by allocating all data structures dynamically. In most Unix utilities, "long lines are silently truncated". This is not acceptable in a GNU utility. Utilities reading files should not drop NUL characters, or any other nonprinting characters. Programs should work properly with multibyte character encodings, such as UTF-8. You can use libiconv to deal with a range of encodings. Check every system call for an error return, unless you know you wish to ignore errors. Include the system error text (from 'strerror', or equivalent) in _every_ error message resulting from a failing system call, as well as the name of the file if any and the name of the utility. Just "cannot open foo.c" or "stat failed" is not sufficient. Check every call to 'malloc' or 'realloc' to see if it returned 'NULL'. Check 'realloc' even if you are making the block smaller; in a system that rounds block sizes to a power of 2, 'realloc' may get a different block if you ask for less space. You must expect 'free' to alter the contents of the block that was freed. Anything you want to fetch from the block, you must fetch before calling 'free'. If 'malloc' fails in a noninteractive program, make that a fatal error. In an interactive program (one that reads commands from the user), it is better to abort the command and return to the command reader loop. This allows the user to kill other processes to free up virtual memory, and then try the command again. Use 'getopt_long' to decode arguments, unless the argument syntax makes this unreasonable. When static storage is to be written in during program execution, use explicit C code to initialize it. This way, restarting the program (without reloading it), or part of it, will reinitialize those variables. Reserve C initialized declarations for data that will not be changed. Try to avoid low-level interfaces to obscure Unix data structures (such as file directories, utmp, or the layout of kernel memory), since these are less likely to work compatibly. If you need to find all the files in a directory, use 'readdir' or some other high-level interface. These are supported compatibly by GNU. The preferred signal handling facilities are the BSD variant of 'signal', and the POSIX 'sigaction' function; the alternative USG 'signal' interface is an inferior design. Nowadays, using the POSIX signal functions may be the easiest way to make a program portable. If you use 'signal', then on GNU/Linux systems running GNU libc version 1, you should include 'bsd/signal.h' instead of 'signal.h', so as to get BSD behavior. It is up to you whether to support systems where 'signal' has only the USG behavior, or give up on them. In error checks that detect "impossible" conditions, just abort. There is usually no point in printing any message. These checks indicate the existence of bugs. Whoever wants to fix the bugs will have to read the source code and run a debugger. So explain the problem with comments in the source. The relevant data will be in variables, which are easy to examine with the debugger, so there is no point moving them elsewhere. Do not use a count of errors as the exit status for a program. _That does not work_, because exit status values are limited to 8 bits (0 through 255). A single run of the program might have 256 errors; if you try to return 256 as the exit status, the parent process will see 0 as the status, and it will appear that the program succeeded. If you make temporary files, check the 'TMPDIR' environment variable; if that variable is defined, use the specified directory instead of '/tmp'. In addition, be aware that there is a possible security problem when creating temporary files in world-writable directories. In C, you can avoid this problem by creating temporary files in this manner: fd = open (filename, O_WRONLY | O_CREAT | O_EXCL, 0600); or by using the 'mkstemps' function from Gnulib (*note (gnulib)mkstemps::). In bash, use 'set -C' (long name 'noclobber') to avoid this problem. In addition, the 'mktemp' utility is a more general solution for creating temporary files from shell scripts (*note (coreutils)mktemp invocation::).  File: standards.info, Node: Libraries, Next: Errors, Prev: Semantics, Up: Program Behavior 4.3 Library Behavior ==================== Try to make library functions reentrant. If they need to do dynamic storage allocation, at least try to avoid any nonreentrancy aside from that of 'malloc' itself. Here are certain name conventions for libraries, to avoid name conflicts. Choose a name prefix for the library, more than two characters long. All external function and variable names should start with this prefix. In addition, there should only be one of these in any given library member. This usually means putting each one in a separate source file. An exception can be made when two external symbols are always used together, so that no reasonable program could use one without the other; then they can both go in the same file. External symbols that are not documented entry points for the user should have names beginning with '_'. The '_' should be followed by the chosen name prefix for the library, to prevent collisions with other libraries. These can go in the same files with user entry points if you like. Static functions and variables can be used as you like and need not fit any naming convention.  File: standards.info, Node: Errors, Next: User Interfaces, Prev: Libraries, Up: Program Behavior 4.4 Formatting Error Messages ============================= Error messages from compilers should look like this: SOURCEFILE:LINENO: MESSAGE If you want to mention the column number, use one of these formats: SOURCEFILE:LINENO:COLUMN: MESSAGE SOURCEFILE:LINENO.COLUMN: MESSAGE Line numbers should start from 1 at the beginning of the file, and column numbers should start from 1 at the beginning of the line. (Both of these conventions are chosen for compatibility.) Calculate column numbers assuming that space and all ASCII printing characters have equal width, and assuming tab stops every 8 columns. For non-ASCII characters, Unicode character widths should be used when in a UTF-8 locale; GNU libc and GNU gnulib provide suitable 'wcwidth' functions. The error message can also give both the starting and ending positions of the erroneous text. There are several formats so that you can avoid redundant information such as a duplicate line number. Here are the possible formats: SOURCEFILE:LINE1.COLUMN1-LINE2.COLUMN2: MESSAGE SOURCEFILE:LINE1.COLUMN1-COLUMN2: MESSAGE SOURCEFILE:LINE1-LINE2: MESSAGE When an error is spread over several files, you can use this format: FILE1:LINE1.COLUMN1-FILE2:LINE2.COLUMN2: MESSAGE Error messages from other noninteractive programs should look like this: PROGRAM:SOURCEFILE:LINENO: MESSAGE when there is an appropriate source file, or like this: PROGRAM: MESSAGE when there is no relevant source file. If you want to mention the column number, use this format: PROGRAM:SOURCEFILE:LINENO:COLUMN: MESSAGE In an interactive program (one that is reading commands from a terminal), it is better not to include the program name in an error message. The place to indicate which program is running is in the prompt or with the screen layout. (When the same program runs with input from a source other than a terminal, it is not interactive and would do best to print error messages using the noninteractive style.) The string MESSAGE should not begin with a capital letter when it follows a program name and/or file name, because that isn't the beginning of a sentence. (The sentence conceptually starts at the beginning of the line.) Also, it should not end with a period. Error messages from interactive programs, and other messages such as usage messages, should start with a capital letter. But they should not end with a period.  File: standards.info, Node: User Interfaces, Next: Finding Program Files, Prev: Errors, Up: Program Behavior 4.5 Standards for Interfaces Generally ====================================== Please don't make the behavior of a utility depend on the name used to invoke it. It is useful sometimes to make a link to a utility with a different name, and that should not change what it does. Thus, if you make 'foo' a link to 'ls', the program should behave the same regardless of which of those names is used to invoke it. Instead, use a run time option or a compilation switch or both to select among the alternate behaviors. You can also build two versions of the program, with different default behaviors, and install them under two different names. Likewise, please don't make the behavior of a command-line program depend on the type of output device it gets as standard output or standard input. Device independence is an important principle of the system's design; do not compromise it merely to save someone from typing an option now and then. (Variation in error message syntax when using a terminal is ok, because that is a side issue that people do not depend on.) If you think one behavior is most useful when the output is to a terminal, and another is most useful when the output is a file or a pipe, then it is usually best to make the default behavior the one that is useful with output to a terminal, and have an option for the other behavior. You can also build two different versions of the program with different names. There is an exception for programs whose output in certain cases is binary data. Sending such output to a terminal is useless and can cause trouble. If such a program normally sends its output to stdout, it should detect, in these cases, when the output is a terminal and give an error message instead. The '-f' option should override this exception, thus permitting the output to go to the terminal. Compatibility requires certain programs to depend on the type of output device. It would be disastrous if 'ls' or 'sh' did not do so in the way all users expect. In some of these cases, we supplement the program with a preferred alternate version that does not depend on the output device type. For example, we provide a 'dir' program much like 'ls' except that its default output format is always multi-column format.  File: standards.info, Node: Finding Program Files, Next: Graphical Interfaces, Prev: User Interfaces, Up: Program Behavior 4.6 Finding the Program's Executable and Associated Files ========================================================= A program may need to find the executable file it was started with, so as to relaunch the same program. It may need to find associated files, either source files or files constructed by building, that it uses at run time. The way to find them starts with looking at 'argv[0]'. If that string contains a slash, it is by convention the file name of the executable and its directory part is the directory that contained the executable. This is the case when the program was not found through 'PATH', which normally means it was built but not installed, and run from the build directory. The program can use the 'argv[0]' file name to relaunch itself, and can look in its directory part for associated files. If that file name is not absolute, then it is relative to the working directory in which the program started. If 'argv[0]' does not contain a slash, it is a command name whose executable was found via 'PATH'. The program should search for that name in the directories in 'PATH', interpreting '.' as the working directory that was current when the program started. If this procedure finds the executable, we call the directory it was found in the "invocation directory". The program should check for the presence in that directory of the associated files it needs. If the program's executable is normally built in a subdirectory of the main build directory, and the main build directory contains associated files (perhaps including subdirectories), the program should look at the parent of the invocation directory, checking for the associated files and subdirectories the main build directory should contain. If the invocation directory doesn't contain what's needed, but the executable file name is a symbolic link, the program should try using the link target's containing directory as the invocation directory. If this procedure doesn't come up with an invocation directory that is valid--normally the case for an installed program that was found via 'PATH'--the program should look for the associated files in the directories where the program's makefile installs them. *Note Directory Variables::. Providing valid information in 'argv[0]' is a convention, not guaranteed. Well-behaved programs that launch other programs, such as shells, follow the convention; your code should follow it too, when launching other programs. But it is always possible to launch the program and give a nonsensical value in 'argv[0]'. Therefore, any program that needs to know the location of its executable, or that of of other associated files, should offer the user environment variables to specify those locations explicitly. *Don't give special privilege, such as with the 'setuid' bit, to programs that will search heuristically for associated files or for their own executables when invoked that way.* Limit that privilege to programs that find associated files in hard-coded installed locations such as under '/usr' and '/etc'. *Note (bash)Bourne Shell Variables::, for more information about 'PATH'.  File: standards.info, Node: Graphical Interfaces, Next: Command-Line Interfaces, Prev: Finding Program Files, Up: Program Behavior 4.7 Standards for Graphical Interfaces ====================================== When you write a program that provides a graphical user interface, please make it work with the X Window System, using the GTK+ toolkit or the GNUstep toolkit, unless the functionality specifically requires some alternative (for example, "displaying jpeg images while in console mode"). In addition, please provide a command-line interface to control the functionality. (In many cases, the graphical user interface can be a separate program which invokes the command-line program.) This is so that the same jobs can be done from scripts. Please also consider providing a D-bus interface for use from other running programs, such as within GNOME. (GNOME used to use CORBA for this, but that is being phased out.) In addition, consider providing a library interface (for use from C), and perhaps a keyboard-driven console interface (for use by users from console mode). Once you are doing the work to provide the functionality and the graphical interface, these won't be much extra work. Please make your program interoperate with access technology such as screen readers (see ). This should be automatic if you use GTK+.  File: standards.info, Node: Command-Line Interfaces, Next: Dynamic Plug-In Interfaces, Prev: Graphical Interfaces, Up: Program Behavior 4.8 Standards for Command Line Interfaces ========================================= It is a good idea to follow the POSIX guidelines for the command-line options of a program. The easiest way to do this is to use 'getopt' to parse them. Note that the GNU version of 'getopt' will normally permit options anywhere among the arguments unless the special argument '--' is used. This is not what POSIX specifies; it is a GNU extension. Please define long-named options that are equivalent to the single-letter Unix-style options. We hope to make GNU more user friendly this way. This is easy to do with the GNU function 'getopt_long'. One of the advantages of long-named options is that they can be consistent from program to program. For example, users should be able to expect the "verbose" option of any GNU program which has one, to be spelled precisely '--verbose'. To achieve this uniformity, look at the table of common long-option names when you choose the option names for your program (*note Option Table::). It is usually a good idea for file names given as ordinary arguments to be input files only; any output files would be specified using options (preferably '-o' or '--output'). Even if you allow an output file name as an ordinary argument for compatibility, try to provide an option as another way to specify it. This will lead to more consistency among GNU utilities, and fewer idiosyncrasies for users to remember. All programs should support two standard options: '--version' and '--help'. CGI programs should accept these as command-line options, and also if given as the 'PATH_INFO'; for instance, visiting 'http://example.org/p.cgi/--help' in a browser should output the same information as invoking 'p.cgi --help' from the command line. * Menu: * --version:: The standard output for -version. * --help:: The standard output for -help.  File: standards.info, Node: --version, Next: --help, Up: Command-Line Interfaces 4.8.1 '--version' ----------------- The standard '--version' option should direct the program to print information about its name, version, origin and legal status, all on standard output, and then exit successfully. Other options and arguments should be ignored once this is seen, and the program should not perform its normal function. The first line is meant to be easy for a program to parse; the version number proper starts after the last space. In addition, it contains the canonical name for this program, in this format: GNU Emacs 19.30 The program's name should be a constant string; _don't_ compute it from 'argv[0]'. The idea is to state the standard or canonical name for the program, not its file name. There are other ways to find out the precise file name where a command is found in 'PATH'. If the program is a subsidiary part of a larger package, mention the package name in parentheses, like this: emacsserver (GNU Emacs) 19.30 If the package has a version number which is different from this program's version number, you can mention the package version number just before the close-parenthesis. If you _need_ to mention the version numbers of libraries which are distributed separately from the package which contains this program, you can do so by printing an additional line of version info for each library you want to mention. Use the same format for these lines as for the first line. Please do not mention all of the libraries that the program uses "just for completeness"--that would produce a lot of unhelpful clutter. Please mention library version numbers only if you find in practice that they are very important to you in debugging. The following line, after the version number line or lines, should be a copyright notice. If more than one copyright notice is called for, put each on a separate line. Next should follow a line stating the license, preferably using one of abbreviations below, and a brief statement that the program is free software, and that users are free to copy and change it. Also mention that there is no warranty, to the extent permitted by law. See recommended wording below. It is ok to finish the output with a list of the major authors of the program, as a way of giving credit. Here's an example of output that follows these rules: GNU hello 2.3 Copyright (C) 2007 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. You should adapt this to your program, of course, filling in the proper year, copyright holder, name of program, and the references to distribution terms, and changing the rest of the wording as necessary. This copyright notice only needs to mention the most recent year in which changes were made--there's no need to list the years for previous versions' changes. You don't have to mention the name of the program in these notices, if that is inconvenient, since it appeared in the first line. (The rules are different for copyright notices in source files; *note (maintain)Copyright Notices::.) Translations of the above lines must preserve the validity of the copyright notices (*note Internationalization::). If the translation's character set supports it, the '(C)' should be replaced with the copyright symbol, as follows: (the official copyright symbol, which is the letter C in a circle); Write the word "Copyright" exactly like that, in English. Do not translate it into another language. International treaties recognize the English word "Copyright"; translations into other languages do not have legal significance. Finally, here is the table of our suggested license abbreviations. Any abbreviation can be followed by 'vVERSION[+]', meaning that particular version, or later versions with the '+', as shown above. In the case of a GNU license, _always_ indicate the permitted versions in this way. In the case of exceptions for extra permissions with the GPL, we use '/' for a separator; the version number can follow the license abbreviation as usual, as in the examples below. GPL GNU General Public License, . LGPL GNU Lesser General Public License, . GPL/Ada GNU GPL with the exception for Ada. Apache The Apache Software Foundation license, . Artistic The Artistic license used for Perl, . Expat The Expat license, . MPL The Mozilla Public License, . OBSD The original (4-clause) BSD license, incompatible with the GNU GPL, . PHP The license used for PHP, . public domain The non-license that is being in the public domain, . Python The license for Python, . RBSD The revised (3-clause) BSD, compatible with the GNU GPL, . X11 The simple non-copyleft license used for most versions of the X Window System, . Zlib The license for Zlib, . More information about these licenses and many more are on the GNU licensing web pages, .  File: standards.info, Node: --help, Prev: --version, Up: Command-Line Interfaces 4.8.2 '--help' -------------- The standard '--help' option should output brief documentation for how to invoke the program, on standard output, then exit successfully. Other options and arguments should be ignored once this is seen, and the program should not perform its normal function. Near the end of the '--help' option's output, please place lines giving the email address for bug reports, the package's home page (normally 'https://www.gnu.org/software/PKG', and the general page for help using GNU programs. The format should be like this: Report bugs to: MAILING-ADDRESS PKG home page: General help using GNU software: It is ok to mention other appropriate mailing lists and web pages.  File: standards.info, Node: Dynamic Plug-In Interfaces, Next: Option Table, Prev: Command-Line Interfaces, Up: Program Behavior 4.9 Standards for Dynamic Plug-in Interfaces ============================================ Another aspect of keeping free programs free is encouraging development of free plug-ins, and discouraging development of proprietary plug-ins. Many GNU programs will not have anything like plug-ins at all, but those that do should follow these practices. First, the general plug-in architecture design should closely tie the plug-in to the original code, such that the plug-in and the base program are parts of one extended program. For GCC, for example, plug-ins receive and modify GCC's internal data structures, and so clearly form an extended program with the base GCC. Second, you should require plug-in developers to affirm that their plug-ins are released under an appropriate license. This should be enforced with a simple programmatic check. For GCC, again for example, a plug-in must define the global symbol 'plugin_is_GPL_compatible', thus asserting that the plug-in is released under a GPL-compatible license (*note Plugins: (gccint)Plugins.). By adding this check to your program you are not creating a new legal requirement. The GPL itself requires plug-ins to be free software, licensed compatibly. As long as you have followed the first rule above to keep plug-ins closely tied to your original program, the GPL and AGPL already require those plug-ins to be released under a compatible license. The symbol definition in the plug-in--or whatever equivalent works best in your program--makes it harder for anyone who might distribute proprietary plug-ins to legally defend themselves. If a case about this got to court, we can point to that symbol as evidence that the plug-in developer understood that the license had this requirement.  File: standards.info, Node: Option Table, Next: OID Allocations, Prev: Dynamic Plug-In Interfaces, Up: Program Behavior 4.10 Table of Long Options ========================== Here is a table of long options used by GNU programs. It is surely incomplete, but we aim to list all the options that a new program might want to be compatible with. If you use names not already in the table, please send a list of them, with their meanings, so we can update the table. 'after-date' '-N' in 'tar'. 'all' '-a' in 'du', 'ls', 'nm', 'stty', 'uname', and 'unexpand'. 'all-text' '-a' in 'diff'. 'almost-all' '-A' in 'ls'. 'append' '-a' in 'etags', 'tee', 'time'; '-r' in 'tar'. 'archive' '-a' in 'cp'. 'archive-name' '-n' in 'shar'. 'arglength' '-l' in 'm4'. 'ascii' '-a' in 'diff'. 'assign' '-v' in 'gawk'. 'assume-new' '-W' in 'make'. 'assume-old' '-o' in 'make'. 'auto-check' '-a' in 'recode'. 'auto-pager' '-a' in 'wdiff'. 'auto-reference' '-A' in 'ptx'. 'avoid-wraps' '-n' in 'wdiff'. 'background' For server programs, run in the background. 'backward-search' '-B' in 'ctags'. 'basename' '-f' in 'shar'. 'batch' Used in GDB. 'baud' Used in GDB. 'before' '-b' in 'tac'. 'binary' '-b' in 'cpio' and 'diff'. 'bits-per-code' '-b' in 'shar'. 'block-size' Used in 'cpio' and 'tar'. 'blocks' '-b' in 'head' and 'tail'. 'break-file' '-b' in 'ptx'. 'brief' Used in various programs to make output shorter. 'bytes' '-c' in 'head', 'split', and 'tail'. 'c++' '-C' in 'etags'. 'catenate' '-A' in 'tar'. 'cd' Used in various programs to specify the directory to use. 'changes' '-c' in 'chgrp' and 'chown'. 'classify' '-F' in 'ls'. 'colons' '-c' in 'recode'. 'command' '-c' in 'su'; '-x' in GDB. 'compare' '-d' in 'tar'. 'compat' Used in 'gawk'. 'compress' '-Z' in 'tar' and 'shar'. 'concatenate' '-A' in 'tar'. 'confirmation' '-w' in 'tar'. 'context' Used in 'diff'. 'copyleft' '-W copyleft' in 'gawk'. 'copyright' '-C' in 'ptx', 'recode', and 'wdiff'; '-W copyright' in 'gawk'. 'core' Used in GDB. 'count' '-q' in 'who'. 'count-links' '-l' in 'du'. 'create' Used in 'tar' and 'cpio'. 'cut-mark' '-c' in 'shar'. 'cxref' '-x' in 'ctags'. 'date' '-d' in 'touch'. 'debug' '-d' in 'make' and 'm4'; '-t' in Bison. 'define' '-D' in 'm4'. 'defines' '-d' in Bison and 'ctags'. 'delete' '-D' in 'tar'. 'dereference' '-L' in 'chgrp', 'chown', 'cpio', 'du', 'ls', and 'tar'. 'dereference-args' '-D' in 'du'. 'device' Specify an I/O device (special file name). 'diacritics' '-d' in 'recode'. 'dictionary-order' '-d' in 'look'. 'diff' '-d' in 'tar'. 'digits' '-n' in 'csplit'. 'directory' Specify the directory to use, in various programs. In 'ls', it means to show directories themselves rather than their contents. In 'rm' and 'ln', it means to not treat links to directories specially. 'discard-all' '-x' in 'strip'. 'discard-locals' '-X' in 'strip'. 'dry-run' '-n' in 'make'. 'ed' '-e' in 'diff'. 'elide-empty-files' '-z' in 'csplit'. 'end-delete' '-x' in 'wdiff'. 'end-insert' '-z' in 'wdiff'. 'entire-new-file' '-N' in 'diff'. 'environment-overrides' '-e' in 'make'. 'eof' '-e' in 'xargs'. 'epoch' Used in GDB. 'error-limit' Used in 'makeinfo'. 'error-output' '-o' in 'm4'. 'escape' '-b' in 'ls'. 'exclude-from' '-X' in 'tar'. 'exec' Used in GDB. 'exit' '-x' in 'xargs'. 'exit-0' '-e' in 'unshar'. 'expand-tabs' '-t' in 'diff'. 'expression' '-e' in 'sed'. 'extern-only' '-g' in 'nm'. 'extract' '-i' in 'cpio'; '-x' in 'tar'. 'faces' '-f' in 'finger'. 'fast' '-f' in 'su'. 'fatal-warnings' '-E' in 'm4'. 'file' '-f' in 'gawk', 'info', 'make', 'mt', 'sed', and 'tar'. 'field-separator' '-F' in 'gawk'. 'file-prefix' '-b' in Bison. 'file-type' '-F' in 'ls'. 'files-from' '-T' in 'tar'. 'fill-column' Used in 'makeinfo'. 'flag-truncation' '-F' in 'ptx'. 'fixed-output-files' '-y' in Bison. 'follow' '-f' in 'tail'. 'footnote-style' Used in 'makeinfo'. 'force' '-f' in 'cp', 'ln', 'mv', and 'rm'. 'force-prefix' '-F' in 'shar'. 'foreground' For server programs, run in the foreground; in other words, don't do anything special to run the server in the background. 'format' Used in 'ls', 'time', and 'ptx'. 'freeze-state' '-F' in 'm4'. 'fullname' Used in GDB. 'gap-size' '-g' in 'ptx'. 'get' '-x' in 'tar'. 'graphic' '-i' in 'ul'. 'graphics' '-g' in 'recode'. 'group' '-g' in 'install'. 'gzip' '-z' in 'tar' and 'shar'. 'hashsize' '-H' in 'm4'. 'header' '-h' in 'objdump' and 'recode' 'heading' '-H' in 'who'. 'help' Used to ask for brief usage information. 'here-delimiter' '-d' in 'shar'. 'hide-control-chars' '-q' in 'ls'. 'html' In 'makeinfo', output HTML. 'idle' '-u' in 'who'. 'ifdef' '-D' in 'diff'. 'ignore' '-I' in 'ls'; '-x' in 'recode'. 'ignore-all-space' '-w' in 'diff'. 'ignore-backups' '-B' in 'ls'. 'ignore-blank-lines' '-B' in 'diff'. 'ignore-case' '-f' in 'look' and 'ptx'; '-i' in 'diff' and 'wdiff'. 'ignore-errors' '-i' in 'make'. 'ignore-file' '-i' in 'ptx'. 'ignore-indentation' '-I' in 'etags'. 'ignore-init-file' '-f' in Oleo. 'ignore-interrupts' '-i' in 'tee'. 'ignore-matching-lines' '-I' in 'diff'. 'ignore-space-change' '-b' in 'diff'. 'ignore-zeros' '-i' in 'tar'. 'include' '-i' in 'etags'; '-I' in 'm4'. 'include-dir' '-I' in 'make'. 'incremental' '-G' in 'tar'. 'info' '-i', '-l', and '-m' in Finger. 'init-file' In some programs, specify the name of the file to read as the user's init file. 'initial' '-i' in 'expand'. 'initial-tab' '-T' in 'diff'. 'inode' '-i' in 'ls'. 'interactive' '-i' in 'cp', 'ln', 'mv', 'rm'; '-e' in 'm4'; '-p' in 'xargs'; '-w' in 'tar'. 'intermix-type' '-p' in 'shar'. 'iso-8601' Used in 'date' 'jobs' '-j' in 'make'. 'just-print' '-n' in 'make'. 'keep-going' '-k' in 'make'. 'keep-files' '-k' in 'csplit'. 'kilobytes' '-k' in 'du' and 'ls'. 'language' '-l' in 'etags'. 'less-mode' '-l' in 'wdiff'. 'level-for-gzip' '-g' in 'shar'. 'line-bytes' '-C' in 'split'. 'lines' Used in 'split', 'head', and 'tail'. 'link' '-l' in 'cpio'. 'lint' 'lint-old' Used in 'gawk'. 'list' '-t' in 'cpio'; '-l' in 'recode'. 'list' '-t' in 'tar'. 'literal' '-N' in 'ls'. 'load-average' '-l' in 'make'. 'login' Used in 'su'. 'machine' Used in 'uname'. 'macro-name' '-M' in 'ptx'. 'mail' '-m' in 'hello' and 'uname'. 'make-directories' '-d' in 'cpio'. 'makefile' '-f' in 'make'. 'mapped' Used in GDB. 'max-args' '-n' in 'xargs'. 'max-chars' '-n' in 'xargs'. 'max-lines' '-l' in 'xargs'. 'max-load' '-l' in 'make'. 'max-procs' '-P' in 'xargs'. 'mesg' '-T' in 'who'. 'message' '-T' in 'who'. 'minimal' '-d' in 'diff'. 'mixed-uuencode' '-M' in 'shar'. 'mode' '-m' in 'install', 'mkdir', and 'mkfifo'. 'modification-time' '-m' in 'tar'. 'multi-volume' '-M' in 'tar'. 'name-prefix' '-a' in Bison. 'nesting-limit' '-L' in 'm4'. 'net-headers' '-a' in 'shar'. 'new-file' '-W' in 'make'. 'no-builtin-rules' '-r' in 'make'. 'no-character-count' '-w' in 'shar'. 'no-check-existing' '-x' in 'shar'. 'no-common' '-3' in 'wdiff'. 'no-create' '-c' in 'touch'. 'no-defines' '-D' in 'etags'. 'no-deleted' '-1' in 'wdiff'. 'no-dereference' '-d' in 'cp'. 'no-inserted' '-2' in 'wdiff'. 'no-keep-going' '-S' in 'make'. 'no-lines' '-l' in Bison. 'no-piping' '-P' in 'shar'. 'no-prof' '-e' in 'gprof'. 'no-regex' '-R' in 'etags'. 'no-sort' '-p' in 'nm'. 'no-splash' Don't print a startup splash screen. 'no-split' Used in 'makeinfo'. 'no-static' '-a' in 'gprof'. 'no-time' '-E' in 'gprof'. 'no-timestamp' '-m' in 'shar'. 'no-validate' Used in 'makeinfo'. 'no-wait' Used in 'emacsclient'. 'no-warn' Used in various programs to inhibit warnings. 'node' '-n' in 'info'. 'nodename' '-n' in 'uname'. 'nonmatching' '-f' in 'cpio'. 'nstuff' '-n' in 'objdump'. 'null' '-0' in 'xargs'. 'number' '-n' in 'cat'. 'number-nonblank' '-b' in 'cat'. 'numeric-sort' '-n' in 'nm'. 'numeric-uid-gid' '-n' in 'cpio' and 'ls'. 'nx' Used in GDB. 'old-archive' '-o' in 'tar'. 'old-file' '-o' in 'make'. 'one-file-system' '-l' in 'tar', 'cp', and 'du'. 'only-file' '-o' in 'ptx'. 'only-prof' '-f' in 'gprof'. 'only-time' '-F' in 'gprof'. 'options' '-o' in 'getopt', 'fdlist', 'fdmount', 'fdmountd', and 'fdumount'. 'output' In various programs, specify the output file name. 'output-prefix' '-o' in 'shar'. 'override' '-o' in 'rm'. 'overwrite' '-c' in 'unshar'. 'owner' '-o' in 'install'. 'paginate' '-l' in 'diff'. 'paragraph-indent' Used in 'makeinfo'. 'parents' '-p' in 'mkdir' and 'rmdir'. 'pass-all' '-p' in 'ul'. 'pass-through' '-p' in 'cpio'. 'port' '-P' in 'finger'. 'portability' '-c' in 'cpio' and 'tar'. 'posix' Used in 'gawk'. 'prefix-builtins' '-P' in 'm4'. 'prefix' '-f' in 'csplit'. 'preserve' Used in 'tar' and 'cp'. 'preserve-environment' '-p' in 'su'. 'preserve-modification-time' '-m' in 'cpio'. 'preserve-order' '-s' in 'tar'. 'preserve-permissions' '-p' in 'tar'. 'print' '-l' in 'diff'. 'print-chars' '-L' in 'cmp'. 'print-data-base' '-p' in 'make'. 'print-directory' '-w' in 'make'. 'print-file-name' '-o' in 'nm'. 'print-symdefs' '-s' in 'nm'. 'printer' '-p' in 'wdiff'. 'prompt' '-p' in 'ed'. 'proxy' Specify an HTTP proxy. 'query-user' '-X' in 'shar'. 'question' '-q' in 'make'. 'quiet' Used in many programs to inhibit the usual output. Every program accepting '--quiet' should accept '--silent' as a synonym. 'quiet-unshar' '-Q' in 'shar' 'quote-name' '-Q' in 'ls'. 'rcs' '-n' in 'diff'. 're-interval' Used in 'gawk'. 'read-full-blocks' '-B' in 'tar'. 'readnow' Used in GDB. 'recon' '-n' in 'make'. 'record-number' '-R' in 'tar'. 'recursive' Used in 'chgrp', 'chown', 'cp', 'ls', 'diff', and 'rm'. 'reference' '-r' in 'touch'. 'references' '-r' in 'ptx'. 'regex' '-r' in 'tac' and 'etags'. 'release' '-r' in 'uname'. 'reload-state' '-R' in 'm4'. 'relocation' '-r' in 'objdump'. 'rename' '-r' in 'cpio'. 'replace' '-i' in 'xargs'. 'report-identical-files' '-s' in 'diff'. 'reset-access-time' '-a' in 'cpio'. 'reverse' '-r' in 'ls' and 'nm'. 'reversed-ed' '-f' in 'diff'. 'right-side-defs' '-R' in 'ptx'. 'same-order' '-s' in 'tar'. 'same-permissions' '-p' in 'tar'. 'save' '-g' in 'stty'. 'se' Used in GDB. 'sentence-regexp' '-S' in 'ptx'. 'separate-dirs' '-S' in 'du'. 'separator' '-s' in 'tac'. 'sequence' Used by 'recode' to chose files or pipes for sequencing passes. 'shell' '-s' in 'su'. 'show-all' '-A' in 'cat'. 'show-c-function' '-p' in 'diff'. 'show-ends' '-E' in 'cat'. 'show-function-line' '-F' in 'diff'. 'show-tabs' '-T' in 'cat'. 'silent' Used in many programs to inhibit the usual output. Every program accepting '--silent' should accept '--quiet' as a synonym. 'size' '-s' in 'ls'. 'socket' Specify a file descriptor for a network server to use for its socket, instead of opening and binding a new socket. This provides a way to run, in a non-privileged process, a server that normally needs a reserved port number. 'sort' Used in 'ls'. 'source' '-W source' in 'gawk'. 'sparse' '-S' in 'tar'. 'speed-large-files' '-H' in 'diff'. 'split-at' '-E' in 'unshar'. 'split-size-limit' '-L' in 'shar'. 'squeeze-blank' '-s' in 'cat'. 'start-delete' '-w' in 'wdiff'. 'start-insert' '-y' in 'wdiff'. 'starting-file' Used in 'tar' and 'diff' to specify which file within a directory to start processing with. 'statistics' '-s' in 'wdiff'. 'stdin-file-list' '-S' in 'shar'. 'stop' '-S' in 'make'. 'strict' '-s' in 'recode'. 'strip' '-s' in 'install'. 'strip-all' '-s' in 'strip'. 'strip-debug' '-S' in 'strip'. 'submitter' '-s' in 'shar'. 'suffix' '-S' in 'cp', 'ln', 'mv'. 'suffix-format' '-b' in 'csplit'. 'sum' '-s' in 'gprof'. 'summarize' '-s' in 'du'. 'symbolic' '-s' in 'ln'. 'symbols' Used in GDB and 'objdump'. 'synclines' '-s' in 'm4'. 'sysname' '-s' in 'uname'. 'tabs' '-t' in 'expand' and 'unexpand'. 'tabsize' '-T' in 'ls'. 'terminal' '-T' in 'tput' and 'ul'. '-t' in 'wdiff'. 'text' '-a' in 'diff'. 'text-files' '-T' in 'shar'. 'time' Used in 'ls' and 'touch'. 'timeout' Specify how long to wait before giving up on some operation. 'to-stdout' '-O' in 'tar'. 'total' '-c' in 'du'. 'touch' '-t' in 'make', 'ranlib', and 'recode'. 'trace' '-t' in 'm4'. 'traditional' '-t' in 'hello'; '-W traditional' in 'gawk'; '-G' in 'ed', 'm4', and 'ptx'. 'tty' Used in GDB. 'typedefs' '-t' in 'ctags'. 'typedefs-and-c++' '-T' in 'ctags'. 'typeset-mode' '-t' in 'ptx'. 'uncompress' '-z' in 'tar'. 'unconditional' '-u' in 'cpio'. 'undefine' '-U' in 'm4'. 'undefined-only' '-u' in 'nm'. 'update' '-u' in 'cp', 'ctags', 'mv', 'tar'. 'usage' Used in 'gawk'; same as '--help'. 'uuencode' '-B' in 'shar'. 'vanilla-operation' '-V' in 'shar'. 'verbose' Print more information about progress. Many programs support this. 'verify' '-W' in 'tar'. 'version' Print the version number. 'version-control' '-V' in 'cp', 'ln', 'mv'. 'vgrind' '-v' in 'ctags'. 'volume' '-V' in 'tar'. 'what-if' '-W' in 'make'. 'whole-size-limit' '-l' in 'shar'. 'width' '-w' in 'ls' and 'ptx'. 'word-regexp' '-W' in 'ptx'. 'writable' '-T' in 'who'. 'zeros' '-z' in 'gprof'.  File: standards.info, Node: OID Allocations, Next: Memory Usage, Prev: Option Table, Up: Program Behavior 4.11 OID Allocations ==================== The OID (object identifier) 1.3.6.1.4.1.11591 has been assigned to the GNU Project (thanks to Sergey Poznyakoff). These are used for SNMP, LDAP, X.509 certificates, and so on. The web site has a (voluntary) listing of many OID assignments. If you need a new slot for your GNU package, write . Here is a list of arcs currently assigned: 1.3.6.1.4.1.11591 GNU 1.3.6.1.4.1.11591.1 GNU Radius 1.3.6.1.4.1.11591.2 GnuPG 1.3.6.1.4.1.11591.2.1 notation 1.3.6.1.4.1.11591.2.1.1 pkaAddress 1.3.6.1.4.1.11591.3 GNU Radar 1.3.6.1.4.1.11591.4 GNU GSS 1.3.6.1.4.1.11591.5 GNU Mailutils 1.3.6.1.4.1.11591.6 GNU Shishi 1.3.6.1.4.1.11591.7 GNU Radio 1.3.6.1.4.1.11591.8 GNU Dico 1.3.6.1.4.1.11591.9 GNU Rush 1.3.6.1.4.1.11591.12 digestAlgorithm 1.3.6.1.4.1.11591.12.2 TIGER/192 1.3.6.1.4.1.11591.13 encryptionAlgorithm 1.3.6.1.4.1.11591.13.2 Serpent 1.3.6.1.4.1.11591.13.2.1 Serpent-128-ECB 1.3.6.1.4.1.11591.13.2.2 Serpent-128-CBC 1.3.6.1.4.1.11591.13.2.3 Serpent-128-OFB 1.3.6.1.4.1.11591.13.2.4 Serpent-128-CFB 1.3.6.1.4.1.11591.13.2.21 Serpent-192-ECB 1.3.6.1.4.1.11591.13.2.22 Serpent-192-CBC 1.3.6.1.4.1.11591.13.2.23 Serpent-192-OFB 1.3.6.1.4.1.11591.13.2.24 Serpent-192-CFB 1.3.6.1.4.1.11591.13.2.41 Serpent-256-ECB 1.3.6.1.4.1.11591.13.2.42 Serpent-256-CBC 1.3.6.1.4.1.11591.13.2.43 Serpent-256-OFB 1.3.6.1.4.1.11591.13.2.44 Serpent-256-CFB 1.3.6.1.4.1.11591.14 CRC algorithms 1.3.6.1.4.1.11591.14.1 CRC 32 1.3.6.1.4.1.11591.15 ellipticCurve 1.3.6.1.4.1.11591.15.1 Ed25519  File: standards.info, Node: Memory Usage, Next: File Usage, Prev: OID Allocations, Up: Program Behavior 4.12 Memory Usage ================= If a program typically uses just a few meg of memory, don't bother making any effort to reduce memory usage. For example, if it is impractical for other reasons to operate on files more than a few meg long, it is reasonable to read entire input files into memory to operate on them. However, for programs such as 'cat' or 'tail', that can usefully operate on very large files, it is important to avoid using a technique that would artificially limit the size of files it can handle. If a program works by lines and could be applied to arbitrary user-supplied input files, it should keep only a line in memory, because this is not very hard and users will want to be able to operate on input files that are bigger than will fit in memory all at once. If your program creates complicated data structures, just make them in memory and give a fatal error if 'malloc' returns 'NULL'. Memory analysis tools such as 'valgrind' can be useful, but don't complicate a program merely to avoid their false alarms. For example, if memory is used until just before a process exits, don't free it simply to silence such a tool.  File: standards.info, Node: File Usage, Prev: Memory Usage, Up: Program Behavior 4.13 File Usage =============== Programs should be prepared to operate when '/usr' and '/etc' are read-only file systems. Thus, if the program manages log files, lock files, backup files, score files, or any other files which are modified for internal purposes, these files should not be stored in '/usr' or '/etc'. There are two exceptions. '/etc' is used to store system configuration information; it is reasonable for a program to modify files in '/etc' when its job is to update the system configuration. Also, if the user explicitly asks to modify one file in a directory, it is reasonable for the program to store other files in the same directory.  File: standards.info, Node: Writing C, Next: Documentation, Prev: Program Behavior, Up: Top 5 Making The Best Use of C ************************** This chapter provides advice on how best to use the C language when writing GNU software. * Menu: * Formatting:: Formatting your source code. * Comments:: Commenting your work. * Syntactic Conventions:: Clean use of C constructs. * Names:: Naming variables, functions, and files. * System Portability:: Portability among different operating systems. * CPU Portability:: Supporting the range of CPU types. * System Functions:: Portability and "standard" library functions. * Internationalization:: Techniques for internationalization. * Character Set:: Use ASCII by default. * Quote Characters:: Use "..." or '...' in the C locale. * Mmap:: How you can safely use 'mmap'.  File: standards.info, Node: Formatting, Next: Comments, Up: Writing C 5.1 Formatting Your Source Code =============================== Please keep the length of source lines to 79 characters or less, for maximum readability in the widest range of environments. It is important to put the open-brace that starts the body of a C function in column one, so that they will start a defun. Several tools look for open-braces in column one to find the beginnings of C functions. These tools will not work on code not formatted that way. Avoid putting open-brace, open-parenthesis or open-bracket in column one when they are inside a function, so that they won't start a defun. The open-brace that starts a 'struct' body can go in column one if you find it useful to treat that definition as a defun. It is also important for function definitions to start the name of the function in column one. This helps people to search for function definitions, and may also help certain tools recognize them. Thus, using Standard C syntax, the format is this: static char * concat (char *s1, char *s2) { ... } or, if you want to use traditional C syntax, format the definition like this: static char * concat (s1, s2) /* Name starts in column one here */ char *s1, *s2; { /* Open brace in column one here */ ... } In Standard C, if the arguments don't fit nicely on one line, split it like this: int lots_of_args (int an_integer, long a_long, short a_short, double a_double, float a_float) ... For 'struct' and 'enum' types, likewise put the braces in column one, unless the whole contents fits on one line: struct foo { int a, b; } or struct foo { int a, b; } The rest of this section gives our recommendations for other aspects of C formatting style, which is also the default style of the 'indent' program in version 1.2 and newer. It corresponds to the options -nbad -bap -nbc -bbo -bl -bli2 -bls -ncdb -nce -cp1 -cs -di2 -ndj -nfc1 -nfca -hnl -i2 -ip5 -lp -pcs -psl -nsc -nsob We don't think of these recommendations as requirements, because it causes no problems for users if two different programs have different formatting styles. But whatever style you use, please use it consistently, since a mixture of styles within one program tends to look ugly. If you are contributing changes to an existing program, please follow the style of that program. For the body of the function, our recommended style looks like this: if (x < foo (y, z)) haha = bar[4] + 5; else { while (z) { haha += foo (z, z); z--; } return ++x + bar (); } We find it easier to read a program when it has spaces before the open-parentheses and after the commas. Especially after the commas. When you split an expression into multiple lines, split it before an operator, not after one. Here is the right way: if (foo_this_is_long && bar > win (x, y, z) && remaining_condition) Try to avoid having two operators of different precedence at the same level of indentation. For example, don't write this: mode = (inmode[j] == VOIDmode || GET_MODE_SIZE (outmode[j]) > GET_MODE_SIZE (inmode[j]) ? outmode[j] : inmode[j]); Instead, use extra parentheses so that the indentation shows the nesting: mode = ((inmode[j] == VOIDmode || (GET_MODE_SIZE (outmode[j]) > GET_MODE_SIZE (inmode[j]))) ? outmode[j] : inmode[j]); Insert extra parentheses so that Emacs will indent the code properly. For example, the following indentation looks nice if you do it by hand, v = rup->ru_utime.tv_sec*1000 + rup->ru_utime.tv_usec/1000 + rup->ru_stime.tv_sec*1000 + rup->ru_stime.tv_usec/1000; but Emacs would alter it. Adding a set of parentheses produces something that looks equally nice, and which Emacs will preserve: v = (rup->ru_utime.tv_sec*1000 + rup->ru_utime.tv_usec/1000 + rup->ru_stime.tv_sec*1000 + rup->ru_stime.tv_usec/1000); Format do-while statements like this: do { a = foo (a); } while (a > 0); Please use formfeed characters (control-L) to divide the program into pages at logical places (but not within a function). It does not matter just how long the pages are, since they do not have to fit on a printed page. The formfeeds should appear alone on lines by themselves.  File: standards.info, Node: Comments, Next: Syntactic Conventions, Prev: Formatting, Up: Writing C 5.2 Commenting Your Work ======================== Every program should start with a comment saying briefly what it is for. Example: 'fmt - filter for simple filling of text'. This comment should be at the top of the source file containing the 'main' function of the program. Also, please write a brief comment at the start of each source file, with the file name and a line or two about the overall purpose of the file. Please write the comments in a GNU program in English, because English is the one language that nearly all programmers in all countries can read. If you do not write English well, please write comments in English as well as you can, then ask other people to help rewrite them. If you can't write comments in English, please find someone to work with you and translate your comments into English. Please put a comment on each function saying what the function does, what sorts of arguments it gets, and what the possible values of arguments mean and are used for. It is not necessary to duplicate in words the meaning of the C argument declarations, if a C type is being used in its customary fashion. If there is anything nonstandard about its use (such as an argument of type 'char *' which is really the address of the second character of a string, not the first), or any possible values that would not work the way one would expect (such as, that strings containing newlines are not guaranteed to work), be sure to say so. Also explain the significance of the return value, if there is one. Please put two spaces after the end of a sentence in your comments, so that the Emacs sentence commands will work. Also, please write complete sentences and capitalize the first word. If a lower-case identifier comes at the beginning of a sentence, don't capitalize it! Changing the spelling makes it a different identifier. If you don't like starting a sentence with a lower case letter, write the sentence differently (e.g., "The identifier lower-case is ..."). The comment on a function is much clearer if you use the argument names to speak about the argument values. The variable name itself should be lower case, but write it in upper case when you are speaking about the value rather than the variable itself. Thus, "the inode number NODE_NUM" rather than "an inode". There is usually no purpose in restating the name of the function in the comment before it, because readers can see that for themselves. There might be an exception when the comment is so long that the function itself would be off the bottom of the screen. There should be a comment on each static variable as well, like this: /* Nonzero means truncate lines in the display; zero means continue them. */ int truncate_lines; Every '#endif' should have a comment, except in the case of short conditionals (just a few lines) that are not nested. The comment should state the condition of the conditional that is ending, _including its sense_. '#else' should have a comment describing the condition _and sense_ of the code that follows. For example: #ifdef foo ... #else /* not foo */ ... #endif /* not foo */ #ifdef foo ... #endif /* foo */ but, by contrast, write the comments this way for a '#ifndef': #ifndef foo ... #else /* foo */ ... #endif /* foo */ #ifndef foo ... #endif /* not foo */  File: standards.info, Node: Syntactic Conventions, Next: Names, Prev: Comments, Up: Writing C 5.3 Clean Use of C Constructs ============================= Please explicitly declare the types of all objects. For example, you should explicitly declare all arguments to functions, and you should declare functions to return 'int' rather than omitting the 'int'. Some programmers like to use the GCC '-Wall' option, and change the code whenever it issues a warning. If you want to do this, then do. Other programmers prefer not to use '-Wall', because it gives warnings for valid and legitimate code which they do not want to change. If you want to do this, then do. The compiler should be your servant, not your master. Don't make the program ugly just to placate static analysis tools such as 'lint', 'clang', and GCC with extra warnings options such as '-Wconversion' and '-Wundef'. These tools can help find bugs and unclear code, but they can also generate so many false alarms that it hurts readability to silence them with unnecessary casts, wrappers, and other complications. For example, please don't insert casts to 'void' or calls to do-nothing functions merely to pacify a lint checker. Declarations of external functions and functions to appear later in the source file should all go in one place near the beginning of the file (somewhere before the first function definition in the file), or else should go in a header file. Don't put 'extern' declarations inside functions. It used to be common practice to use the same local variables (with names like 'tem') over and over for different values within one function. Instead of doing this, it is better to declare a separate local variable for each distinct purpose, and give it a name which is meaningful. This not only makes programs easier to understand, it also facilitates optimization by good compilers. You can also move the declaration of each local variable into the smallest scope that includes all its uses. This makes the program even cleaner. Don't use local variables or parameters that shadow global identifiers. GCC's '-Wshadow' option can detect this problem. Don't declare multiple variables in one declaration that spans lines. Start a new declaration on each line, instead. For example, instead of this: int foo, bar; write either this: int foo, bar; or this: int foo; int bar; (If they are global variables, each should have a comment preceding it anyway.) When you have an 'if'-'else' statement nested in another 'if' statement, always put braces around the 'if'-'else'. Thus, never write like this: if (foo) if (bar) win (); else lose (); always like this: if (foo) { if (bar) win (); else lose (); } If you have an 'if' statement nested inside of an 'else' statement, either write 'else if' on one line, like this, if (foo) ... else if (bar) ... with its 'then'-part indented like the preceding 'then'-part, or write the nested 'if' within braces like this: if (foo) ... else { if (bar) ... } Don't declare both a structure tag and variables or typedefs in the same declaration. Instead, declare the structure tag separately and then use it to declare the variables or typedefs. Try to avoid assignments inside 'if'-conditions (assignments inside 'while'-conditions are ok). For example, don't write this: if ((foo = (char *) malloc (sizeof *foo)) == NULL) fatal ("virtual memory exhausted"); instead, write this: foo = (char *) malloc (sizeof *foo); if (foo == NULL) fatal ("virtual memory exhausted");  File: standards.info, Node: Names, Next: System Portability, Prev: Syntactic Conventions, Up: Writing C 5.4 Naming Variables, Functions, and Files ========================================== The names of global variables and functions in a program serve as comments of a sort. So don't choose terse names--instead, look for names that give useful information about the meaning of the variable or function. In a GNU program, names should be English, like other comments. Local variable names can be shorter, because they are used only within one context, where (presumably) comments explain their purpose. Try to limit your use of abbreviations in symbol names. It is ok to make a few abbreviations, explain what they mean, and then use them frequently, but don't use lots of obscure abbreviations. Please use underscores to separate words in a name, so that the Emacs word commands can be useful within them. Stick to lower case; reserve upper case for macros and 'enum' constants, and for name-prefixes that follow a uniform convention. For example, you should use names like 'ignore_space_change_flag'; don't use names like 'iCantReadThis'. Variables that indicate whether command-line options have been specified should be named after the meaning of the option, not after the option-letter. A comment should state both the exact meaning of the option and its letter. For example, /* Ignore changes in horizontal whitespace (-b). */ int ignore_space_change_flag; When you want to define names with constant integer values, use 'enum' rather than '#define'. GDB knows about enumeration constants. You might want to make sure that none of the file names would conflict if the files were loaded onto an MS-DOS file system which shortens the names. You can use the program 'doschk' to test for this. Some GNU programs were designed to limit themselves to file names of 14 characters or less, to avoid file name conflicts if they are read into older System V systems. Please preserve this feature in the existing GNU programs that have it, but there is no need to do this in new GNU programs. 'doschk' also reports file names longer than 14 characters.  File: standards.info, Node: System Portability, Next: CPU Portability, Prev: Names, Up: Writing C 5.5 Portability between System Types ==================================== In the Unix world, "portability" refers to porting to different Unix versions. For a GNU program, this kind of portability is desirable, but not paramount. The primary purpose of GNU software is to run on top of the GNU kernel, compiled with the GNU C compiler, on various types of CPU. So the kinds of portability that are absolutely necessary are quite limited. But it is important to support Linux-based GNU systems, since they are the form of GNU that is popular. Beyond that, it is good to support the other free operating systems (*BSD), and it is nice to support other Unix-like systems if you want to. Supporting a variety of Unix-like systems is desirable, although not paramount. It is usually not too hard, so you may as well do it. But you don't have to consider it an obligation, if it does turn out to be hard. The easiest way to achieve portability to most Unix-like systems is to use Autoconf. It's unlikely that your program needs to know more information about the host platform than Autoconf can provide, simply because most of the programs that need such knowledge have already been written. Avoid using the format of semi-internal data bases (e.g., directories) when there is a higher-level alternative ('readdir'). As for systems that are not like Unix, such as MS-DOS, Windows, VMS, MVS, and older Macintosh systems, supporting them is often a lot of work. When that is the case, it is better to spend your time adding features that will be useful on GNU and GNU/Linux, rather than on supporting other incompatible systems. If you do support Windows, please do not abbreviate it as "win". *Note Trademarks::. Usually we write the name "Windows" in full, but when brevity is very important (as in file names and some symbol names), we abbreviate it to "w". In GNU Emacs, for instance, we use 'w32' in file names of Windows-specific files, but the macro for Windows conditionals is called 'WINDOWSNT'. In principle there could also be 'w64'. It is a good idea to define the "feature test macro" '_GNU_SOURCE' when compiling your C files. When you compile on GNU or GNU/Linux, this will enable the declarations of GNU library extension functions, and that will usually give you a compiler error message if you define the same function names in some other way in your program. (You don't have to actually _use_ these functions, if you prefer to make the program more portable to other systems.) But whether or not you use these GNU extensions, you should avoid using their names for any other meanings. Doing so would make it hard to move your code into other GNU programs.  File: standards.info, Node: CPU Portability, Next: System Functions, Prev: System Portability, Up: Writing C 5.6 Portability between CPUs ============================ Even GNU systems will differ because of differences among CPU types--for example, difference in byte ordering and alignment requirements. It is absolutely essential to handle these differences. However, don't make any effort to cater to the possibility that an 'int' will be less than 32 bits. We don't support 16-bit machines in GNU. You need not cater to the possibility that 'long' will be smaller than pointers and 'size_t'. We know of one such platform: 64-bit programs on Microsoft Windows. If you care about making your package run on Windows using Mingw64, you would need to deal with 8-byte pointers and 4-byte 'long', which would break this code: printf ("size = %lu\n", (unsigned long) sizeof array); printf ("diff = %ld\n", (long) (pointer2 - pointer1)); Whether to support Mingw64, and Windows in general, in your package is your choice. The GNU Project doesn't say you have any responsibility to do so. Our goal is to replace proprietary systems, including Windows, not to enhance them. If people pressure you to make your program run on Windows, and you are not interested, you can respond with, "Switch to GNU/Linux -- your freedom depends on it." Predefined file-size types like 'off_t' are an exception: they are longer than 'long' on many platforms, so code like the above won't work with them. One way to print an 'off_t' value portably is to print its digits yourself, one by one. Don't assume that the address of an 'int' object is also the address of its least-significant byte. This is false on big-endian machines. Thus, don't make the following mistake: int c; ... while ((c = getchar ()) != EOF) write (file_descriptor, &c, 1); Instead, use 'unsigned char' as follows. (The 'unsigned' is for portability to unusual systems where 'char' is signed and where there is integer overflow checking.) int c; while ((c = getchar ()) != EOF) { unsigned char u = c; write (file_descriptor, &u, 1); } Avoid casting pointers to integers if you can. Such casts greatly reduce portability, and in most programs they are easy to avoid. In the cases where casting pointers to integers is essential--such as, a Lisp interpreter which stores type information as well as an address in one word--you'll have to make explicit provisions to handle different word sizes. You will also need to make provision for systems in which the normal range of addresses you can get from 'malloc' starts far away from zero.  File: standards.info, Node: System Functions, Next: Internationalization, Prev: CPU Portability, Up: Writing C 5.7 Calling System Functions ============================ Historically, C implementations differed substantially, and many systems lacked a full implementation of ANSI/ISO C89. Nowadays, however, all practical systems have a C89 compiler and GNU C supports almost all of C99 and some of C11. Similarly, most systems implement POSIX.1-2001 libraries and tools, and many have POSIX.1-2008. Hence, there is little reason to support old C or non-POSIX systems, and you may want to take advantage of standard C and POSIX to write clearer, more portable, or faster code. You should use standard interfaces where possible; but if GNU extensions make your program more maintainable, powerful, or otherwise better, don't hesitate to use them. In any case, don't make your own declaration of system functions; that's a recipe for conflict. Despite the standards, nearly every library function has some sort of portability issue on some system or another. Here are some examples: 'open' Names with trailing '/''s are mishandled on many platforms. 'printf' 'long double' may be unimplemented; floating values Infinity and NaN are often mishandled; output for large precisions may be incorrect. 'readlink' May return 'int' instead of 'ssize_t'. 'scanf' On Windows, 'errno' is not set on failure. Gnulib (https://www.gnu.org/software/gnulib/) is a big help in this regard. Gnulib provides implementations of standard interfaces on many of the systems that lack them, including portable implementations of enhanced GNU interfaces, thereby making their use portable, and of POSIX-1.2008 interfaces, some of which are missing even on up-to-date GNU systems. Gnulib also provides many useful non-standard interfaces; for example, C implementations of standard data structures (hash tables, binary trees), error-checking type-safe wrappers for memory allocation functions ('xmalloc', 'xrealloc'), and output of error messages. Gnulib integrates with GNU Autoconf and Automake to remove much of the burden of writing portable code from the programmer: Gnulib makes your configure script automatically determine what features are missing and use the Gnulib code to supply the missing pieces. The Gnulib and Autoconf manuals have extensive sections on portability: *note Introduction: (gnulib)Top. and *note (autoconf)Portable C and C++::. Please consult them for many more details.  File: standards.info, Node: Internationalization, Next: Character Set, Prev: System Functions, Up: Writing C 5.8 Internationalization ======================== GNU has a library called GNU gettext that makes it easy to translate the messages in a program into various languages. You should use this library in every program. Use English for the messages as they appear in the program, and let gettext provide the way to translate them into other languages. Using GNU gettext involves putting a call to the 'gettext' macro around each string that might need translation--like this: printf (gettext ("Processing file '%s'..."), file); This permits GNU gettext to replace the string '"Processing file '%s'..."' with a translated version. Once a program uses gettext, please make a point of writing calls to 'gettext' when you add new strings that call for translation. Using GNU gettext in a package involves specifying a "text domain name" for the package. The text domain name is used to separate the translations for this package from the translations for other packages. Normally, the text domain name should be the same as the name of the package--for example, 'coreutils' for the GNU core utilities. To enable gettext to work well, avoid writing code that makes assumptions about the structure of words or sentences. When you want the precise text of a sentence to vary depending on the data, use two or more alternative string constants each containing a complete sentences, rather than inserting conditionalized words or phrases into a single sentence framework. Here is an example of what not to do: printf ("%s is full", capacity > 5000000 ? "disk" : "floppy disk"); If you apply gettext to all strings, like this, printf (gettext ("%s is full"), capacity > 5000000 ? gettext ("disk") : gettext ("floppy disk")); the translator will hardly know that "disk" and "floppy disk" are meant to be substituted in the other string. Worse, in some languages (like French) the construction will not work: the translation of the word "full" depends on the gender of the first part of the sentence; it happens to be not the same for "disk" as for "floppy disk". Complete sentences can be translated without problems: printf (capacity > 5000000 ? gettext ("disk is full") : gettext ("floppy disk is full")); A similar problem appears at the level of sentence structure with this code: printf ("# Implicit rule search has%s been done.\n", f->tried_implicit ? "" : " not"); Adding 'gettext' calls to this code cannot give correct results for all languages, because negation in some languages requires adding words at more than one place in the sentence. By contrast, adding 'gettext' calls does the job straightforwardly if the code starts out like this: printf (f->tried_implicit ? "# Implicit rule search has been done.\n", : "# Implicit rule search has not been done.\n"); Another example is this one: printf ("%d file%s processed", nfiles, nfiles != 1 ? "s" : ""); The problem with this example is that it assumes that plurals are made by adding 's'. If you apply gettext to the format string, like this, printf (gettext ("%d file%s processed"), nfiles, nfiles != 1 ? "s" : ""); the message can use different words, but it will still be forced to use 's' for the plural. Here is a better way, with gettext being applied to the two strings independently: printf ((nfiles != 1 ? gettext ("%d files processed") : gettext ("%d file processed")), nfiles); But this still doesn't work for languages like Polish, which has three plural forms: one for nfiles == 1, one for nfiles == 2, 3, 4, 22, 23, 24, ... and one for the rest. The GNU 'ngettext' function solves this problem: printf (ngettext ("%d files processed", "%d file processed", nfiles), nfiles);  File: standards.info, Node: Character Set, Next: Quote Characters, Prev: Internationalization, Up: Writing C 5.9 Character Set ================= Sticking to the ASCII character set (plain text, 7-bit characters) is preferred in GNU source code comments, text documents, and other contexts, unless there is good reason to do something else because of the application domain. For example, if source code deals with the French Revolutionary calendar, it is OK if its literal strings contain accented characters in month names like "Floréal". Also, it is OK (but not required) to use non-ASCII characters to represent proper names of contributors in change logs (*note Change Logs::). If you need to use non-ASCII characters, you should normally stick with one encoding, certainly within a single file. UTF-8 is likely to be the best choice.  File: standards.info, Node: Quote Characters, Next: Mmap, Prev: Character Set, Up: Writing C 5.10 Quote Characters ===================== In the C locale, the output of GNU programs should stick to plain ASCII for quotation characters in messages to users: preferably 0x22 ('"') or 0x27 (''') for both opening and closing quotes. Although GNU programs traditionally used 0x60 ('`') for opening and 0x27 (''') for closing quotes, nowadays quotes '`like this'' are typically rendered asymmetrically, so quoting '"like this"' or ''like this'' typically looks better. It is ok, but not required, for GNU programs to generate locale-specific quotes in non-C locales. For example: printf (gettext ("Processing file '%s'..."), file); Here, a French translation might cause 'gettext' to return the string '"Traitement de fichier ‹ %s ›..."', yielding quotes more appropriate for a French locale. Sometimes a program may need to use opening and closing quotes directly. By convention, 'gettext' translates the string '"`"' to the opening quote and the string '"'"' to the closing quote, and a program can use these translations. Generally, though, it is better to translate quote characters in the context of longer strings. If the output of your program is ever likely to be parsed by another program, it is good to provide an option that makes this parsing reliable. For example, you could escape special characters using conventions from the C language or the Bourne shell. See for example the option '--quoting-style' of GNU 'ls'.  File: standards.info, Node: Mmap, Prev: Quote Characters, Up: Writing C 5.11 Mmap ========= If you use 'mmap' to read or write files, don't assume it either works on all files or fails for all files. It may work on some files and fail on others. The proper way to use 'mmap' is to try it on the specific file for which you want to use it--and if 'mmap' doesn't work, fall back on doing the job in another way using 'read' and 'write'. The reason this precaution is needed is that the GNU kernel (the HURD) provides a user-extensible file system, in which there can be many different kinds of "ordinary files". Many of them support 'mmap', but some do not. It is important to make programs handle all these kinds of files.  File: standards.info, Node: Documentation, Next: Managing Releases, Prev: Writing C, Up: Top 6 Documenting Programs ********************** A GNU program should ideally come with full free documentation, adequate for both reference and tutorial purposes. If the package can be programmed or extended, the documentation should cover programming or extending it, as well as just using it. * Menu: * GNU Manuals:: Writing proper manuals. * Doc Strings and Manuals:: Compiling doc strings doesn't make a manual. * Manual Structure Details:: Specific structure conventions. * License for Manuals:: Writing the distribution terms for a manual. * Manual Credits:: Giving credit to documentation contributors. * Printed Manuals:: Mentioning the printed manual. * NEWS File:: NEWS files supplement manuals. * Change Logs:: Recording changes. * Man Pages:: Man pages are secondary. * Reading other Manuals:: How far you can go in learning from other manuals.  File: standards.info, Node: GNU Manuals, Next: Doc Strings and Manuals, Up: Documentation 6.1 GNU Manuals =============== The preferred document format for the GNU system is the Texinfo formatting language. Every GNU package should (ideally) have documentation in Texinfo both for reference and for learners. Texinfo makes it possible to produce a good quality formatted book, using TeX, and to generate an Info file. It is also possible to generate HTML output from Texinfo source. See the Texinfo manual, either the hardcopy, or the on-line version available through 'info' or the Emacs Info subsystem ('C-h i'). Nowadays some other formats such as Docbook and Sgmltexi can be converted automatically into Texinfo. It is ok to produce the Texinfo documentation by conversion this way, as long as it gives good results. Make sure your manual is clear to a reader who knows nothing about the topic and reads it straight through. This means covering basic topics at the beginning, and advanced topics only later. This also means defining every specialized term when it is first used. Remember that the audience for a GNU manual (and other GNU documentation) is global, and that it will be used for years, maybe decades. This means that the reader could have very different cultural reference points. Decades from now, all but old folks will have very different cultural reference points; many things that "everyone knows about" today may be mostly forgotten. For this reason, try to avoid writing in a way that depends on cultural reference points for proper understanding, or that refers to them in ways that would impede reading for someone that doesn't recognize them. Likewise, be conservative in your choice of words (aside from technical terms), linguistic constructs, and spelling: aim to make them intelligible to readers from ten years ago. In any contest for trendiness, GNU writing should not even qualify to enter. It is ok to refer once in a rare while to spatially or temporally localized reference points or facts, if it is directly pertinent or as an aside. Changing these few things (which in any case stand out) when they no longer make sense will not be a lot of work. By contrast, it is always proper to refer to concepts of GNU and the free software movement, when they are pertinent. These are a central part of our message, so we should take advantage of opportunities to mention them. They are fundamental moral positions, so they will rarely if ever change. Programmers tend to carry over the structure of the program as the structure for its documentation. But this structure is not necessarily good for explaining how to use the program; it may be irrelevant and confusing for a user. Instead, the right way to structure documentation is according to the concepts and questions that a user will have in mind when reading it. This principle applies at every level, from the lowest (ordering sentences in a paragraph) to the highest (ordering of chapter topics within the manual). Sometimes this structure of ideas matches the structure of the implementation of the software being documented--but often they are different. An important part of learning to write good documentation is to learn to notice when you have unthinkingly structured the documentation like the implementation, stop yourself, and look for better alternatives. For example, each program in the GNU system probably ought to be documented in one manual; but this does not mean each program should have its own manual. That would be following the structure of the implementation, rather than the structure that helps the user understand. Instead, each manual should cover a coherent _topic_. For example, instead of a manual for 'diff' and a manual for 'diff3', we have one manual for "comparison of files" which covers both of those programs, as well as 'cmp'. By documenting these programs together, we can make the whole subject clearer. The manual which discusses a program should certainly document all of the program's command-line options and all of its commands. It should give examples of their use. But don't organize the manual as a list of features. Instead, organize it logically, by subtopics. Address the questions that a user will ask when thinking about the job that the program does. Don't just tell the reader what each feature can do--say what jobs it is good for, and show how to use it for those jobs. Explain what is recommended usage, and what kinds of usage users should avoid. In general, a GNU manual should serve both as tutorial and reference. It should be set up for convenient access to each topic through Info, and for reading straight through (appendixes aside). A GNU manual should give a good introduction to a beginner reading through from the start, and should also provide all the details that hackers want. The Bison manual is a good example of this--please take a look at it to see what we mean. That is not as hard as it first sounds. Arrange each chapter as a logical breakdown of its topic, but order the sections, and write their text, so that reading the chapter straight through makes sense. Do likewise when structuring the book into chapters, and when structuring a section into paragraphs. The watchword is, _at each point, address the most fundamental and important issue raised by the preceding text._ If necessary, add extra chapters at the beginning of the manual which are purely tutorial and cover the basics of the subject. These provide the framework for a beginner to understand the rest of the manual. The Bison manual provides a good example of how to do this. To serve as a reference, a manual should have an Index that lists all the functions, variables, options, and important concepts that are part of the program. One combined Index should do for a short manual, but sometimes for a complex package it is better to use multiple indices. The Texinfo manual includes advice on preparing good index entries, see *note Making Index Entries: (texinfo)Index Entries, and see *note Defining the Entries of an Index: (texinfo)Indexing Commands. Don't use Unix man pages as a model for how to write GNU documentation; most of them are terse, badly structured, and give inadequate explanation of the underlying concepts. (There are, of course, some exceptions.) Also, Unix man pages use a particular format which is different from what we use in GNU manuals. Please include an email address in the manual for where to report bugs _in the text of the manual_. Please do not use the term "pathname" that is used in Unix documentation; use "file name" (two words) instead. We use the term "path" only for search paths, which are lists of directory names. Please do not use the term "illegal" to refer to erroneous input to a computer program. Please use "invalid" for this, and reserve the term "illegal" for activities prohibited by law. Please do not write '()' after a function name just to indicate it is a function. 'foo ()' is not a function, it is a function call with no arguments. Whenever possible, please stick to the active voice, avoiding the passive, and use the present tense, not the future tense. For instance, write "The function 'foo' returns a list containing A and B" rather than "A list containing A and B will be returned." One advantage of the active voice is it requires you to state the subject of the sentence; with the passive voice, you might omit the subject, which leads to vagueness. It is proper to use the future tense when grammar demands it, as in, "If you type 'x', the computer will self-destruct in 10 seconds."  File: standards.info, Node: Doc Strings and Manuals, Next: Manual Structure Details, Prev: GNU Manuals, Up: Documentation 6.2 Doc Strings and Manuals =========================== Some programming systems, such as Emacs, provide a documentation string for each function, command or variable. You may be tempted to write a reference manual by compiling the documentation strings and writing a little additional text to go around them--but you must not do it. That approach is a fundamental mistake. The text of well-written documentation strings will be entirely wrong for a manual. A documentation string needs to stand alone--when it appears on the screen, there will be no other text to introduce or explain it. Meanwhile, it can be rather informal in style. The text describing a function or variable in a manual must not stand alone; it appears in the context of a section or subsection. Other text at the beginning of the section should explain some of the concepts, and should often make some general points that apply to several functions or variables. The previous descriptions of functions and variables in the section will also have given information about the topic. A description written to stand alone would repeat some of that information; this redundancy looks bad. Meanwhile, the informality that is acceptable in a documentation string is totally unacceptable in a manual. The only good way to use documentation strings in writing a good manual is to use them as a source of information for writing good text.  File: standards.info, Node: Manual Structure Details, Next: License for Manuals, Prev: Doc Strings and Manuals, Up: Documentation 6.3 Manual Structure Details ============================ The title page of the manual should state the version of the programs or packages documented in the manual. The Top node of the manual should also contain this information. If the manual is changing more frequently than or independent of the program, also state a version number for the manual in both of these places. Each program documented in the manual should have a node named 'PROGRAM Invocation' or 'Invoking PROGRAM'. This node (together with its subnodes, if any) should describe the program's command line arguments and how to run it (the sort of information people would look for in a man page). Start with an '@example' containing a template for all the options and arguments that the program uses. Alternatively, put a menu item in some menu whose item name fits one of the above patterns. This identifies the node which that item points to as the node for this purpose, regardless of the node's actual name. The '--usage' feature of the Info reader looks for such a node or menu item in order to find the relevant text, so it is essential for every Texinfo file to have one. If one manual describes several programs, it should have such a node for each program described in the manual.  File: standards.info, Node: License for Manuals, Next: Manual Credits, Prev: Manual Structure Details, Up: Documentation 6.4 License for Manuals ======================= Please use the GNU Free Documentation License for all GNU manuals that are more than a few pages long. Likewise for a collection of short documents--you only need one copy of the GNU FDL for the whole collection. For a single short document, you can use a very permissive non-copyleft license, to avoid taking up space with a long license. See for more explanation of how to employ the GFDL. Note that it is not obligatory to include a copy of the GNU GPL or GNU LGPL in a manual whose license is neither the GPL nor the LGPL. It can be a good idea to include the program's license in a large manual; in a short manual, whose size would be increased considerably by including the program's license, it is probably better not to include it.  File: standards.info, Node: Manual Credits, Next: Printed Manuals, Prev: License for Manuals, Up: Documentation 6.5 Manual Credits ================== Please credit the principal human writers of the manual as the authors, on the title page of the manual. If a company sponsored the work, thank the company in a suitable place in the manual, but do not cite the company as an author.  File: standards.info, Node: Printed Manuals, Next: NEWS File, Prev: Manual Credits, Up: Documentation 6.6 Printed Manuals =================== The FSF publishes some GNU manuals in printed form. To encourage sales of these manuals, the on-line versions of the manual should mention at the very start that the printed manual is available and should point at information for getting it--for instance, with a link to the page . This should not be included in the printed manual, though, because there it is redundant. It is also useful to explain in the on-line forms of the manual how the user can print out the manual from the sources.  File: standards.info, Node: NEWS File, Next: Change Logs, Prev: Printed Manuals, Up: Documentation 6.7 The NEWS File ================= In addition to its manual, the package should have a file named 'NEWS' which contains a list of user-visible changes worth mentioning. In each new release, add items to the front of the file and identify the version they pertain to. Don't discard old items; leave them in the file after the newer items. This way, a user upgrading from any previous version can see what is new. If the 'NEWS' file gets very long, move some of the older items into a file named 'ONEWS' and put a note at the end referring the user to that file.  File: standards.info, Node: Change Logs, Next: Man Pages, Prev: NEWS File, Up: Documentation 6.8 Change Logs =============== Keep a change log to describe all the changes made to program source files. The purpose of this is so that people investigating bugs in the future will know about the changes that might have introduced the bug. Often a new bug can be found by looking at what was recently changed. More importantly, change logs can help you eliminate conceptual inconsistencies between different parts of a program, by giving you a history of how the conflicting concepts arose, who they came from, and why the conflicting changes were made. Therefore, change logs should be detailed enough and accurate enough to provide the information commonly required for such "software forensics". Specifically, change logs should make finding answers to the following questions easy: * What changes affected a particular source file? * Was a particular source file renamed or moved, and if so, as part of what change? * What changes affected a given function or macro or definition of a data structure? * Was a function (or a macro or the definition of a data structure) renamed or moved from another file, and if so, as part of which change? * What changes deleted a function (or macro or data structure)? * What was the rationale for a given change, and what were its main ideas? * Is there any additional information regarding the change, and if so, where can it be found? Historically, change logs were maintained on specially formatted files. Nowadays, projects commonly keep their source files under a "version control system" (VCS), such as Git, Subversion, or Mercurial. If the VCS repository is publicly accessible, and changes are committed to it separately (one commit for each logical changeset) and record the authors of each change, then the information recorded by the VCS can be used to produce the change logs out of VCS logs, and to answer the above questions by using the suitable VCS commands. (However, the VCS log messages still need to provide some supporting information, as described below.) Projects that maintain such VCS repositories can decide not to maintain separate change log files, and instead rely on the VCS to keep the change logs. If you decide not to maintain separate change log files, you should still consider providing them in the release tarballs, for the benefit of users who'd like to review the change logs without accessing the project's VCS repository. Scripts exist that can produce 'ChangeLog' files from the VCS logs; for example, the 'gitlog-to-changelog' script, which is part of Gnulib, can do that for Git repositories. In Emacs, the command 'C-x v a' ('vc-update-change-log') does the job of incrementally updating a 'ChangeLog' file from the VCS logs. If separate change log files _are_ maintained, they are normally called 'ChangeLog', and each such file covers an entire directory. Each directory can have its own change log file, or a directory can use the change log of its parent directory--it's up to you. * Menu: * Change Log Concepts:: * Style of Change Logs:: * Simple Changes:: * Conditional Changes:: * Indicating the Part Changed::  File: standards.info, Node: Change Log Concepts, Next: Style of Change Logs, Up: Change Logs 6.8.1 Change Log Concepts and Conventions ----------------------------------------- You can think of the change log as a conceptual "undo list" which states how earlier versions were different from the current version. People can see the current version; they don't need the change log to tell them what is in it. What they want from a change log is a clear explanation of how the earlier version differed. Each "entry" in a change log describes either an individual change or the smallest batch of changes that belong together, also known as a "changeset". It is a good idea to start the change log entry with a "header line": a single line that is a complete sentence which summarizes the changeset. If you keep the change log in a VCS, this should be a requirement, as VCS commands that show the change log in abbreviated form, such as 'git log --oneline', treat the header line specially. (In a 'ChangeLog' file, the header line follows a line that says who was the author of the change and when it was installed.) Follow the change log entry's header line with a description of the overall change. This should be as long as needed to give a clear description. Pay special attention to aspects of the changeset not easily gleaned from the diffs or from the names of modified files and functions: the overall idea of the change and the need for it, and the relations, if any, between changes made to different files/functions. If the change or its reasons were discussed on some public forum, such as the project's issue tracker or mailing list, it is a good idea to summarize the main points of that discussion in the change's description, and include a pointer to that discussion or the issue ID for those who'd like to read it in full. The best place to explain how parts of the new code work with other code is in comments in the code, not in the change log. If you think that a change calls for explanation of _why_ the change was needed--that is, what problem the old code had such that it required this change--you're probably right. Please put the explanation in comments in the code, where people will see it whenever they see the code. An example of such an explanation is, "This function used to be iterative, but that failed when MUMBLE was a tree." (Though such a simple reason would not need this kind of explanation.) The best place for other kinds of explanation of the change is in the change log entry. In particular, comments usually will not say why some code was deleted or moved to another place--that belongs to the description of the change which did that. Following the free-text description of the change, it is a good idea to give a list of names of the entities or definitions that you changed, according to the files they are in, and what was changed in each one. *Note Style of Change Logs::. If a project uses a modern VCS to keep the change log information, as described in *note Change Logs::, explicitly listing the files and functions that were changed is not strictly necessary, and in some cases (like identical mechanical changes in many places) even tedious. It is up to you to decide whether to allow your project's developers to omit the list of changed files and functions from the log entries, and whether to allow such omissions under some specific conditions. However, while making this decision, please consider the following benefits of providing the list of changed entities with each change: * Generation of useful 'ChangeLog' files from VCS logs becomes more difficult if the change log entries don't list the modified functions/macros, because VCS commands cannot reliably reproduce their names from the commit information alone. For example, when there is a change in the header part of a function definition, the heading of the diff hunk as shown in the VCS log commands will name the wrong function as being modified (usually, the function defined before the one being modified), so using those diffs to glean the names of the modified functions will produce inaccurate results. You will need to use specialized scripts, such as gnulib's 'vcs-to-changelog.py', mentioned below, to solve these difficulties, and make sure it supports the source languages used by your project. * While modern VCS commands, such as Git's 'git log -L' and 'git log -G', provide powerful means for finding changes that affected a certain function or macro or data structure (and thus might make 'ChangeLog' files unnecessary if you have the repository available), they can sometimes fail. For example, 'git log -L' doesn't support syntax of some programming languages out of the box. Mentioning the modified functions/macros explicitly allows finding the related changes simply and reliably. * Some VCS commands have difficulties or limitations when tracking changes across file moves or renames. Again, if the entities are mentioned explicitly, those difficulties can be overcome. * Users that review changes using the generated 'ChangeLog' files may not have the repository and the VCS commands available to them. Naming the modified entities alleviates that problem. For these reasons, providing lists of modified files and functions with each change makes the change logs more useful, and we therefore recommend to include them whenever possible and practical. It is also possible to generate the lists naming the modified entities by running a script. One such script is 'mklog.py' (written in Python 3); it is used by the 'GCC' project. Gnulib provides another variant of such a script, called 'vcs-to-changelog.py', part of the 'vcs-to-changelog' module. Note that these scripts currently support fewer programming languages than the manual commands provided by Emacs (*note Style of Change Logs::). Therefore, the above mentioned method of generating the 'ChangeLog' file from the VCS commit history, for instance via the 'gitlog-to-changelog' script, usually gives better results--provided that the contributors stick to providing good commit messages.  File: standards.info, Node: Style of Change Logs, Next: Simple Changes, Prev: Change Log Concepts, Up: Change Logs 6.8.2 Style of Change Logs -------------------------- Here are some simple examples of change log entries, starting with the header line that says who made the change and when it was installed, followed by descriptions of specific changes. (These examples are drawn from Emacs.) Keep in mind that the line which shows the date of the change and the author's name and email address is needed only in a separate 'ChangeLog' file, not when the change logs are kept in a VCS. 2019-08-29 Noam Postavsky Handle completely undecoded input in term (Bug#29918) * lisp/term.el (term-emulate-terminal): Avoid errors if the whole decoded string is eight-bit characters. Don't attempt to save the string for next iteration in that case. * test/lisp/term-tests.el (term-decode-partial) (term-undecodable-input): New tests. 2019-06-15 Paul Eggert Port to platforms where tputs is in libtinfow * configure.ac (tputs_library): Also try tinfow, ncursesw (Bug#33977). 2019-02-08 Eli Zaretskii Improve documentation of 'date-to-time' and 'parse-time-string' * doc/lispref/os.texi (Time Parsing): Document 'parse-time-string', and refer to it for the description of the argument of 'date-to-time'. * lisp/calendar/time-date.el (date-to-time): Refer in the doc string to 'parse-time-string' for more information about the format of the DATE argument. (Bug#34303) If you mention the names of the modified functions or variables, it's important to name them in full. Don't abbreviate function or variable names, and don't combine them. Subsequent maintainers will often search for a function name to find all the change log entries that pertain to it; if you abbreviate the name, they won't find it when they search. For example, some people are tempted to abbreviate groups of function names by writing '* register.el ({insert,jump-to}-register)'; this is not a good idea, since searching for 'jump-to-register' or 'insert-register' would not find that entry. Separate unrelated change log entries with blank lines. Don't put blank lines between individual changes of an entry. You can omit the file name and the asterisk when successive individual changes are in the same file. Break long lists of function names by closing continued lines with ')', rather than ',', and opening the continuation with '('. This makes highlighting in Emacs work better. Here is an example: * src/keyboard.c (menu_bar_items, tool_bar_items) (Fexecute_extended_command): Deal with 'keymap' property. The easiest way to add an entry to 'ChangeLog' is with the Emacs command 'M-x add-change-log-entry', or its variant 'C-x 4 a' ('add-change-log-entry-other-window'). This automatically collects the name of the changed file and the changed function or variable, and formats a change log entry according to the conventions described above, leaving it up to you to describe the changes you made to that function or variable. When you install someone else's changes, put the contributor's name in the change log entry rather than in the text of the entry. In other words, write this: 2002-07-14 John Doe * sewing.c: Make it sew. rather than this: 2002-07-14 Usual Maintainer * sewing.c: Make it sew. Patch by jdoe@gnu.org. When committing someone else's changes into a VCS, use the VCS features to specify the author. For example, with Git, use 'git commit --author=AUTHOR'. As for the date, that should be the date you applied the change. (With a VCS, use the appropriate command-line switches, e.g., 'git commit --date=DATE'.) Modern VCS have commands to apply changes sent via email (e.g., Git has 'git am'); in that case the author of the changeset and the date it was made will be automatically gleaned from the email message and recorded in the repository. If the patches are prepared with suitable VCS commands, such as 'git format-patch', the email message body will also have the original author of the changeset, so resending or forwarding the message will not interfere with attributing the changes to their author. Thus, we recommend that you request your contributors to use commands such as 'git format-patch' to prepare the patches.  File: standards.info, Node: Simple Changes, Next: Conditional Changes, Prev: Style of Change Logs, Up: Change Logs 6.8.3 Simple Changes -------------------- Certain simple kinds of changes don't need much detail in the change log. If the description of the change is short enough, it can serve as its own header line: 2019-08-29 Eli Zaretskii * lisp/simple.el (kill-do-not-save-duplicates): Doc fix. (Bug#36827) When you change the calling sequence of a function in a simple fashion, and you change all the callers of the function to use the new calling sequence, there is no need to make individual entries for all the callers that you changed. Just write in the entry for the function being called, "All callers changed"--like this: * keyboard.c (Fcommand_execute): New arg SPECIAL. All callers changed. When you change just comments or doc strings, it is enough to write an entry for the file, without mentioning the functions. Just "Doc fixes" is enough for the change log. When you make changes in many files that follow mechanically from one underlying change, it is enough to describe the underlying change. Here's an example of a change that affects all of the files in the repository: 2019-01-07 Paul Eggert Update copyright year to 2019 Run 'TZ=UTC0 admin/update-copyright $(git ls-files)'. Test suite files are part of the software, so we recommend treating them as code for change-log purposes. There's no technical need to make change log entries for non-software files (manuals, help files, media files, etc.). This is because they are not susceptible to bugs that are hard to understand. To correct an error, you need not know the history of the erroneous passage; it is enough to compare what the file says with the actual facts. However, you should keep change logs for non-software files when the project gets copyright assignments from its contributors, so as to make the records of authorship more accurate. For that reason, we recommend to keep change logs for Texinfo sources of your project's manuals.  File: standards.info, Node: Conditional Changes, Next: Indicating the Part Changed, Prev: Simple Changes, Up: Change Logs 6.8.4 Conditional Changes ------------------------- Source files can often contain code that is conditional to build-time or static conditions. For example, C programs can contain compile-time '#if' conditionals; programs implemented in interpreted languages can contain module imports of function definitions that are only performed for certain versions of the interpreter; and Automake 'Makefile.am' files can contain variable definitions or target declarations that are only to be considered if a configure-time Automake conditional is true. Many changes are conditional as well: sometimes you add a new variable, or function, or even a new program or library, which is entirely dependent on a build-time condition. It is useful to indicate in the change log the conditions for which a change applies. Our convention for indicating conditional changes is to use _square brackets around the name of the condition_. Conditional changes can happen in numerous scenarios and with many variations, so here are some examples to help clarify. This first example describes changes in C, Perl, and Python files which are conditional but do not have an associated function or entity name: * xterm.c [SOLARIS2]: Include . * FilePath.pm [$^O eq 'VMS']: Import the VMS::Feature module. * framework.py [sys.version_info < (2, 6)]: Make "with" statement available by importing it from __future__, to support also python 2.5. Our other examples will for simplicity be limited to C, as the minor changes necessary to adapt them to other languages should be self-evident. Next, here is an entry describing a new definition which is entirely conditional: the C macro 'FRAME_WINDOW_P' is defined (and used) only when the macro 'HAVE_X_WINDOWS' is defined: * frame.h [HAVE_X_WINDOWS] (FRAME_WINDOW_P): Macro defined. Next, an entry for a change within the function 'init_display', whose definition as a whole is unconditional, but the changes themselves are contained in a '#ifdef HAVE_LIBNCURSES' conditional: * dispnew.c (init_display) [HAVE_LIBNCURSES]: If X, call tgetent. Finally, here is an entry for a change that takes effect only when a certain macro is _not_ defined: * host.c (gethostname) [!HAVE_SOCKETS]: Replace with winsock version.  File: standards.info, Node: Indicating the Part Changed, Prev: Conditional Changes, Up: Change Logs 6.8.5 Indicating the Part Changed --------------------------------- Indicate the part of a function which changed by using angle brackets enclosing an indication of what the changed part does. Here is an entry for a change in the part of the function 'sh-while-getopts' that deals with 'sh' commands: * progmodes/sh-script.el (sh-while-getopts) : Handle case that user-specified option string is empty.  File: standards.info, Node: Man Pages, Next: Reading other Manuals, Prev: Change Logs, Up: Documentation 6.9 Man Pages ============= In the GNU project, man pages are secondary. It is not necessary or expected for every GNU program to have a man page, but some of them do. It's your choice whether to include a man page in your program. When you make this decision, consider that supporting a man page requires continual effort each time the program is changed. The time you spend on the man page is time taken away from more useful work. For a simple program which changes little, updating the man page may be a small job. Then there is little reason not to include a man page, if you have one. For a large program that changes a great deal, updating a man page may be a substantial burden. If a user offers to donate a man page, you may find this gift costly to accept. It may be better to refuse the man page unless the same person agrees to take full responsibility for maintaining it--so that you can wash your hands of it entirely. If this volunteer later ceases to do the job, then don't feel obliged to pick it up yourself; it may be better to withdraw the man page from the distribution until someone else agrees to update it. When a program changes only a little, you may feel that the discrepancies are small enough that the man page remains useful without updating. If so, put a prominent note near the beginning of the man page stating that you don't maintain it and that the Texinfo manual is more authoritative. The note should say how to access the Texinfo documentation. Be sure that man pages include a copyright statement and free license. The simple all-permissive license is appropriate for simple man pages (*note (maintain)License Notices for Other Files::). For long man pages, with enough explanation and documentation that they can be considered true manuals, use the GFDL (*note License for Manuals::). Finally, the GNU help2man program () is one way to automate generation of a man page, in this case from '--help' output. This is sufficient in many cases.  File: standards.info, Node: Reading other Manuals, Prev: Man Pages, Up: Documentation 6.10 Reading other Manuals ========================== There may be non-free books or documentation files that describe the program you are documenting. It is ok to use these documents for reference, just as the author of a new algebra textbook can read other books on algebra. A large portion of any non-fiction book consists of facts, in this case facts about how a certain program works, and these facts are necessarily the same for everyone who writes about the subject. But be careful not to copy your outline structure, wording, tables or examples from preexisting non-free documentation. Copying from free documentation may be ok; please check with the FSF about the individual case.  File: standards.info, Node: Managing Releases, Next: References, Prev: Documentation, Up: Top 7 The Release Process ********************* Making a release is more than just bundling up your source files in a tar file and putting it up for FTP. You should set up your software so that it can be configured to run on a variety of systems. Your Makefile should conform to the GNU standards described below, and your directory layout should also conform to the standards discussed below. Doing so makes it easy to include your package into the larger framework of all GNU software. * Menu: * Configuration:: How configuration of GNU packages should work. * Makefile Conventions:: Makefile conventions. * Releases:: Making releases  File: standards.info, Node: Configuration, Next: Makefile Conventions, Up: Managing Releases 7.1 How Configuration Should Work ================================= Each GNU distribution should come with a shell script named 'configure'. This script is given arguments which describe the kind of machine and system you want to compile the program for. The 'configure' script must record the configuration options so that they affect compilation. The description here is the specification of the interface for the 'configure' script in GNU packages. Many packages implement it using GNU Autoconf (*note Introduction: (autoconf)Top.) and/or GNU Automake (*note Introduction: (automake)Top.), but you do not have to use these tools. You can implement it any way you like; for instance, by making 'configure' be a wrapper around a completely different configuration system. Another way for the 'configure' script to operate is to make a link from a standard name such as 'config.h' to the proper configuration file for the chosen system. If you use this technique, the distribution should _not_ contain a file named 'config.h'. This is so that people won't be able to build the program without configuring it first. Another thing that 'configure' can do is to edit the Makefile. If you do this, the distribution should _not_ contain a file named 'Makefile'. Instead, it should include a file 'Makefile.in' which contains the input used for editing. Once again, this is so that people won't be able to build the program without configuring it first. If 'configure' does write the 'Makefile', then 'Makefile' should have a target named 'Makefile' which causes 'configure' to be rerun, setting up the same configuration that was set up last time. The files that 'configure' reads should be listed as dependencies of 'Makefile'. All the files which are output from the 'configure' script should have comments at the beginning stating that they were generated automatically using 'configure'. This is so that users won't think of trying to edit them by hand. The 'configure' script should write a file named 'config.status' which describes which configuration options were specified when the program was last configured. This file should be a shell script which, if run, will recreate the same configuration. The 'configure' script should accept an option of the form '--srcdir=DIRNAME' to specify the directory where sources are found (if it is not the current directory). This makes it possible to build the program in a separate directory, so that the actual source directory is not modified. If the user does not specify '--srcdir', then 'configure' should check both '.' and '..' to see if it can find the sources. If it finds the sources in one of these places, it should use them from there. Otherwise, it should report that it cannot find the sources, and should exit with nonzero status. Usually the easy way to support '--srcdir' is by editing a definition of 'VPATH' into the Makefile. Some rules may need to refer explicitly to the specified source directory. To make this possible, 'configure' can add to the Makefile a variable named 'srcdir' whose value is precisely the specified directory. In addition, the 'configure' script should take options corresponding to most of the standard directory variables (*note Directory Variables::). Here is the list: --prefix --exec-prefix --bindir --sbindir --libexecdir --sysconfdir --sharedstatedir --localstatedir --runstatedir --libdir --includedir --oldincludedir --datarootdir --datadir --infodir --localedir --mandir --docdir --htmldir --dvidir --pdfdir --psdir The 'configure' script should also take an argument which specifies the type of system to build the program for. This argument should look like this: CPU-COMPANY-SYSTEM For example, an Athlon-based GNU/Linux system might be 'i686-pc-linux-gnu'. The 'configure' script needs to be able to decode all plausible alternatives for how to describe a machine. Thus, 'athlon-pc-gnu/linux' would be a valid alias. There is a shell script called 'config.sub' (https://git.savannah.gnu.org/cgit/config.git/plain/config.sub) that you can use as a subroutine to validate system types and canonicalize aliases. The 'configure' script should also take the option '--build=BUILDTYPE', which should be equivalent to a plain BUILDTYPE argument. For example, 'configure --build=i686-pc-linux-gnu' is equivalent to 'configure i686-pc-linux-gnu'. When the build type is not specified by an option or argument, the 'configure' script should normally guess it using the shell script 'config.guess' (https://git.savannah.gnu.org/cgit/config.git/plain/config.guess). Other options are permitted to specify in more detail the software or hardware present on the machine, to include or exclude optional parts of the package, or to adjust the name of some tools or arguments to them: '--enable-FEATURE[=PARAMETER]' Configure the package to build and install an optional user-level facility called FEATURE. This allows users to choose which optional features to include. Giving an optional PARAMETER of 'no' should omit FEATURE, if it is built by default. No '--enable' option should *ever* cause one feature to replace another. No '--enable' option should ever substitute one useful behavior for another useful behavior. The only proper use for '--enable' is for questions of whether to build part of the program or exclude it. '--with-PACKAGE' The package PACKAGE will be installed, so configure this package to work with PACKAGE. Possible values of PACKAGE include 'gnu-as' (or 'gas'), 'gnu-ld', 'gnu-libc', 'gdb', 'x', and 'x-toolkit'. Do not use a '--with' option to specify the file name to use to find certain files. That is outside the scope of what '--with' options are for. 'VARIABLE=VALUE' Set the value of the variable VARIABLE to VALUE. This is used to override the default values of commands or arguments in the build process. For example, the user could issue 'configure CFLAGS=-g CXXFLAGS=-g' to build with debugging information and without the default optimization. Specifying variables as arguments to 'configure', like this: ./configure CC=gcc is preferable to setting them in environment variables: CC=gcc ./configure as it helps to recreate the same configuration later with 'config.status'. However, both methods should be supported. All 'configure' scripts should accept all of the "detail" options and the variable settings, whether or not they make any difference to the particular package at hand. In particular, they should accept any option that starts with '--with-' or '--enable-'. This is so users will be able to configure an entire GNU source tree at once with a single set of options. You will note that the categories '--with-' and '--enable-' are narrow: they *do not* provide a place for any sort of option you might think of. That is deliberate. We want to limit the possible configuration options in GNU software. We do not want GNU programs to have idiosyncratic configuration options. Packages that perform part of the compilation process may support cross-compilation. In such a case, the host and target machines for the program may be different. The 'configure' script should normally treat the specified type of system as both the host and the target, thus producing a program which works for the same type of machine that it runs on. To compile a program to run on a host type that differs from the build type, use the configure option '--host=HOSTTYPE', where HOSTTYPE uses the same syntax as BUILDTYPE. The host type normally defaults to the build type. To configure a cross-compiler, cross-assembler, or what have you, you should specify a target different from the host, using the configure option '--target=TARGETTYPE'. The syntax for TARGETTYPE is the same as for the host type. So the command would look like this: ./configure --host=HOSTTYPE --target=TARGETTYPE The target type normally defaults to the host type. Programs for which cross-operation is not meaningful need not accept the '--target' option, because configuring an entire operating system for cross-operation is not a meaningful operation. Some programs have ways of configuring themselves automatically. If your program is set up to do this, your 'configure' script can simply ignore most of its arguments.  File: standards.info, Node: Makefile Conventions, Next: Releases, Prev: Configuration, Up: Managing Releases 7.2 Makefile Conventions ======================== This node describes conventions for writing the Makefiles for GNU programs. Using Automake will help you write a Makefile that follows these conventions. For more information on portable Makefiles, see POSIX and *note Portable Make Programming: (autoconf)Portable Make. * Menu: * Makefile Basics:: General conventions for Makefiles. * Utilities in Makefiles:: Utilities to be used in Makefiles. * Command Variables:: Variables for specifying commands. * DESTDIR:: Supporting staged installs. * Directory Variables:: Variables for installation directories. * Standard Targets:: Standard targets for users. * Install Command Categories:: Three categories of commands in the 'install' rule: normal, pre-install and post-install.  File: standards.info, Node: Makefile Basics, Next: Utilities in Makefiles, Up: Makefile Conventions 7.2.1 General Conventions for Makefiles --------------------------------------- Every Makefile should contain this line: SHELL = /bin/sh to avoid trouble on systems where the 'SHELL' variable might be inherited from the environment. (This is never a problem with GNU 'make'.) Different 'make' programs have incompatible suffix lists and implicit rules, and this sometimes creates confusion or misbehavior. So it is a good idea to set the suffix list explicitly using only the suffixes you need in the particular Makefile, like this: .SUFFIXES: .SUFFIXES: .c .o The first line clears out the suffix list, the second introduces all suffixes which may be subject to implicit rules in this Makefile. Don't assume that '.' is in the path for command execution. When you need to run programs that are a part of your package during the make, please make sure that it uses './' if the program is built as part of the make or '$(srcdir)/' if the file is an unchanging part of the source code. Without one of these prefixes, the current search path is used. The distinction between './' (the "build directory") and '$(srcdir)/' (the "source directory") is important because users can build in a separate directory using the '--srcdir' option to 'configure'. A rule of the form: foo.1 : foo.man sedscript sed -f sedscript foo.man > foo.1 will fail when the build directory is not the source directory, because 'foo.man' and 'sedscript' are in the source directory. When using GNU 'make', relying on 'VPATH' to find the source file will work in the case where there is a single dependency file, since the 'make' automatic variable '$<' will represent the source file wherever it is. (Many versions of 'make' set '$<' only in implicit rules.) A Makefile target like foo.o : bar.c $(CC) -I. -I$(srcdir) $(CFLAGS) -c bar.c -o foo.o should instead be written as foo.o : bar.c $(CC) -I. -I$(srcdir) $(CFLAGS) -c $< -o $@ in order to allow 'VPATH' to work correctly. When the target has multiple dependencies, using an explicit '$(srcdir)' is the easiest way to make the rule work well. For example, the target above for 'foo.1' is best written as: foo.1 : foo.man sedscript sed -f $(srcdir)/sedscript $(srcdir)/foo.man > $@ GNU distributions usually contain some files which are not source files--for example, Info files, and the output from Autoconf, Automake, Bison or Flex. Since these files normally appear in the source directory, they should always appear in the source directory, not in the build directory. So Makefile rules to update them should put the updated files in the source directory. However, if a file does not appear in the distribution, then the Makefile should not put it in the source directory, because building a program in ordinary circumstances should not modify the source directory in any way. Try to make the build and installation targets, at least (and all their subtargets) work correctly with a parallel 'make'.  File: standards.info, Node: Utilities in Makefiles, Next: Command Variables, Prev: Makefile Basics, Up: Makefile Conventions 7.2.2 Utilities in Makefiles ---------------------------- Write the Makefile commands (and any shell scripts, such as 'configure') to run under 'sh' (both the traditional Bourne shell and the POSIX shell), not 'csh'. Don't use any special features of 'ksh' or 'bash', or POSIX features not widely supported in traditional Bourne 'sh'. The 'configure' script and the Makefile rules for building and installation should not use any utilities directly except these: awk cat cmp cp diff echo egrep expr false grep install-info ln ls mkdir mv printf pwd rm rmdir sed sleep sort tar test touch tr true Compression programs such as 'gzip' can be used in the 'dist' rule. Generally, stick to the widely-supported (usually POSIX-specified) options and features of these programs. For example, don't use 'mkdir -p', convenient as it may be, because a few systems don't support it at all and with others, it is not safe for parallel execution. For a list of known incompatibilities, see *note Portable Shell Programming: (autoconf)Portable Shell. It is a good idea to avoid creating symbolic links in makefiles, since a few file systems don't support them. The Makefile rules for building and installation can also use compilers and related programs, but should do so via 'make' variables so that the user can substitute alternatives. Here are some of the programs we mean: ar bison cc flex install ld ldconfig lex make makeinfo ranlib texi2dvi yacc Use the following 'make' variables to run those programs: $(AR) $(BISON) $(CC) $(FLEX) $(INSTALL) $(LD) $(LDCONFIG) $(LEX) $(MAKE) $(MAKEINFO) $(RANLIB) $(TEXI2DVI) $(YACC) When you use 'ranlib' or 'ldconfig', you should make sure nothing bad happens if the system does not have the program in question. Arrange to ignore an error from that command, and print a message before the command to tell the user that failure of this command does not mean a problem. (The Autoconf 'AC_PROG_RANLIB' macro can help with this.) If you use symbolic links, you should implement a fallback for systems that don't have symbolic links. Additional utilities that can be used via Make variables are: chgrp chmod chown mknod It is ok to use other utilities in Makefile portions (or scripts) intended only for particular systems where you know those utilities exist.  File: standards.info, Node: Command Variables, Next: DESTDIR, Prev: Utilities in Makefiles, Up: Makefile Conventions 7.2.3 Variables for Specifying Commands --------------------------------------- Makefiles should provide variables for overriding certain commands, options, and so on. In particular, you should run most utility programs via variables. Thus, if you use Bison, have a variable named 'BISON' whose default value is set with 'BISON = bison', and refer to it with '$(BISON)' whenever you need to use Bison. File management utilities such as 'ln', 'rm', 'mv', and so on, need not be referred to through variables in this way, since users don't need to replace them with other programs. Each program-name variable should come with an options variable that is used to supply options to the program. Append 'FLAGS' to the program-name variable name to get the options variable name--for example, 'BISONFLAGS'. (The names 'CFLAGS' for the C compiler, 'YFLAGS' for yacc, and 'LFLAGS' for lex, are exceptions to this rule, but we keep them because they are standard.) Use 'CPPFLAGS' in any compilation command that runs the preprocessor, and use 'LDFLAGS' in any compilation command that does linking as well as in any direct use of 'ld'. If there are C compiler options that _must_ be used for proper compilation of certain files, do not include them in 'CFLAGS'. Users expect to be able to specify 'CFLAGS' freely themselves. Instead, arrange to pass the necessary options to the C compiler independently of 'CFLAGS', by writing them explicitly in the compilation commands or by defining an implicit rule, like this: CFLAGS = -g ALL_CFLAGS = -I. $(CFLAGS) .c.o: $(CC) -c $(CPPFLAGS) $(ALL_CFLAGS) $< Do include the '-g' option in 'CFLAGS', because that is not _required_ for proper compilation. You can consider it a default that is only recommended. If the package is set up so that it is compiled with GCC by default, then you might as well include '-O' in the default value of 'CFLAGS' as well. Put 'CFLAGS' last in the compilation command, after other variables containing compiler options, so the user can use 'CFLAGS' to override the others. 'CFLAGS' should be used in every invocation of the C compiler, both those which do compilation and those which do linking. Every Makefile should define the variable 'INSTALL', which is the basic command for installing a file into the system. Every Makefile should also define the variables 'INSTALL_PROGRAM' and 'INSTALL_DATA'. (The default for 'INSTALL_PROGRAM' should be '$(INSTALL)'; the default for 'INSTALL_DATA' should be '${INSTALL} -m 644'.) Then it should use those variables as the commands for actual installation, for executables and non-executables respectively. Minimal use of these variables is as follows: $(INSTALL_PROGRAM) foo $(bindir)/foo $(INSTALL_DATA) libfoo.a $(libdir)/libfoo.a However, it is preferable to support a 'DESTDIR' prefix on the target files, as explained in the next section. It is acceptable, but not required, to install multiple files in one command, with the final argument being a directory, as in: $(INSTALL_PROGRAM) foo bar baz $(bindir)  File: standards.info, Node: DESTDIR, Next: Directory Variables, Prev: Command Variables, Up: Makefile Conventions 7.2.4 'DESTDIR': Support for Staged Installs -------------------------------------------- 'DESTDIR' is a variable prepended to each installed target file, like this: $(INSTALL_PROGRAM) foo $(DESTDIR)$(bindir)/foo $(INSTALL_DATA) libfoo.a $(DESTDIR)$(libdir)/libfoo.a The 'DESTDIR' variable is specified by the user on the 'make' command line as an absolute file name. For example: make DESTDIR=/tmp/stage install 'DESTDIR' should be supported only in the 'install*' and 'uninstall*' targets, as those are the only targets where it is useful. If your installation step would normally install '/usr/local/bin/foo' and '/usr/local/lib/libfoo.a', then an installation invoked as in the example above would install '/tmp/stage/usr/local/bin/foo' and '/tmp/stage/usr/local/lib/libfoo.a' instead. Prepending the variable 'DESTDIR' to each target in this way provides for "staged installs", where the installed files are not placed directly into their expected location but are instead copied into a temporary location ('DESTDIR'). However, installed files maintain their relative directory structure and any embedded file names will not be modified. You should not set the value of 'DESTDIR' in your 'Makefile' at all; then the files are installed into their expected locations by default. Also, specifying 'DESTDIR' should not change the operation of the software in any way, so its value should not be included in any file contents. 'DESTDIR' support is commonly used in package creation. It is also helpful to users who want to understand what a given package will install where, and to allow users who don't normally have permissions to install into protected areas to build and install before gaining those permissions. Finally, it can be useful with tools such as 'stow', where code is installed in one place but made to appear to be installed somewhere else using symbolic links or special mount operations. So, we strongly recommend GNU packages support 'DESTDIR', though it is not an absolute requirement.  File: standards.info, Node: Directory Variables, Next: Standard Targets, Prev: DESTDIR, Up: Makefile Conventions 7.2.5 Variables for Installation Directories -------------------------------------------- Installation directories should always be named by variables, so it is easy to install in a nonstandard place. The standard names for these variables and the values they should have in GNU packages are described below. They are based on a standard file system layout; variants of it are used in GNU/Linux and other modern operating systems. Installers are expected to override these values when calling 'make' (e.g., 'make prefix=/usr install') or 'configure' (e.g., 'configure --prefix=/usr'). GNU packages should not try to guess which value should be appropriate for these variables on the system they are being installed onto: use the default settings specified here so that all GNU packages behave identically, allowing the installer to achieve any desired layout. All installation directories, and their parent directories, should be created (if necessary) before they are installed into. These first two variables set the root for the installation. All the other installation directories should be subdirectories of one of these two, and nothing should be directly installed into these two directories. 'prefix' A prefix used in constructing the default values of the variables listed below. The default value of 'prefix' should be '/usr/local'. When building the complete GNU system, the prefix will be empty and '/usr' will be a symbolic link to '/'. (If you are using Autoconf, write it as '@prefix@'.) Running 'make install' with a different value of 'prefix' from the one used to build the program should _not_ recompile the program. 'exec_prefix' A prefix used in constructing the default values of some of the variables listed below. The default value of 'exec_prefix' should be '$(prefix)'. (If you are using Autoconf, write it as '@exec_prefix@'.) Generally, '$(exec_prefix)' is used for directories that contain machine-specific files (such as executables and subroutine libraries), while '$(prefix)' is used directly for other directories. Running 'make install' with a different value of 'exec_prefix' from the one used to build the program should _not_ recompile the program. Executable programs are installed in one of the following directories. 'bindir' The directory for installing executable programs that users can run. This should normally be '/usr/local/bin', but write it as '$(exec_prefix)/bin'. (If you are using Autoconf, write it as '@bindir@'.) 'sbindir' The directory for installing executable programs that can be run from the shell, but are only generally useful to system administrators. This should normally be '/usr/local/sbin', but write it as '$(exec_prefix)/sbin'. (If you are using Autoconf, write it as '@sbindir@'.) 'libexecdir' The directory for installing executable programs to be run by other programs rather than by users. This directory should normally be '/usr/local/libexec', but write it as '$(exec_prefix)/libexec'. (If you are using Autoconf, write it as '@libexecdir@'.) The definition of 'libexecdir' is the same for all packages, so you should install your data in a subdirectory thereof. Most packages install their data under '$(libexecdir)/PACKAGE-NAME/', possibly within additional subdirectories thereof, such as '$(libexecdir)/PACKAGE-NAME/MACHINE/VERSION'. Data files used by the program during its execution are divided into categories in two ways. * Some files are normally modified by programs; others are never normally modified (though users may edit some of these). * Some files are architecture-independent and can be shared by all machines at a site; some are architecture-dependent and can be shared only by machines of the same kind and operating system; others may never be shared between two machines. This makes for six different possibilities. However, we want to discourage the use of architecture-dependent files, aside from object files and libraries. It is much cleaner to make other data files architecture-independent, and it is generally not hard. Here are the variables Makefiles should use to specify directories to put these various kinds of files in: 'datarootdir' The root of the directory tree for read-only architecture-independent data files. This should normally be '/usr/local/share', but write it as '$(prefix)/share'. (If you are using Autoconf, write it as '@datarootdir@'.) 'datadir''s default value is based on this variable; so are 'infodir', 'mandir', and others. 'datadir' The directory for installing idiosyncratic read-only architecture-independent data files for this program. This is usually the same place as 'datarootdir', but we use the two separate variables so that you can move these program-specific files without altering the location for Info files, man pages, etc. This should normally be '/usr/local/share', but write it as '$(datarootdir)'. (If you are using Autoconf, write it as '@datadir@'.) The definition of 'datadir' is the same for all packages, so you should install your data in a subdirectory thereof. Most packages install their data under '$(datadir)/PACKAGE-NAME/'. 'sysconfdir' The directory for installing read-only data files that pertain to a single machine-that is to say, files for configuring a host. Mailer and network configuration files, '/etc/passwd', and so forth belong here. All the files in this directory should be ordinary ASCII text files. This directory should normally be '/usr/local/etc', but write it as '$(prefix)/etc'. (If you are using Autoconf, write it as '@sysconfdir@'.) Do not install executables here in this directory (they probably belong in '$(libexecdir)' or '$(sbindir)'). Also do not install files that are modified in the normal course of their use (programs whose purpose is to change the configuration of the system excluded). Those probably belong in '$(localstatedir)'. 'sharedstatedir' The directory for installing architecture-independent data files which the programs modify while they run. This should normally be '/usr/local/com', but write it as '$(prefix)/com'. (If you are using Autoconf, write it as '@sharedstatedir@'.) 'localstatedir' The directory for installing data files which the programs modify while they run, and that pertain to one specific machine. Users should never need to modify files in this directory to configure the package's operation; put such configuration information in separate files that go in '$(datadir)' or '$(sysconfdir)'. '$(localstatedir)' should normally be '/usr/local/var', but write it as '$(prefix)/var'. (If you are using Autoconf, write it as '@localstatedir@'.) 'runstatedir' The directory for installing data files which the programs modify while they run, that pertain to one specific machine, and which need not persist longer than the execution of the program--which is generally long-lived, for example, until the next reboot. PID files for system daemons are a typical use. In addition, this directory should not be cleaned except perhaps at reboot, while the general '/tmp' ('TMPDIR') may be cleaned arbitrarily. This should normally be '/var/run', but write it as '$(localstatedir)/run'. Having it as a separate variable allows the use of '/run' if desired, for example. (If you are using Autoconf 2.70 or later, write it as '@runstatedir@'.) These variables specify the directory for installing certain specific types of files, if your program has them. Every GNU package should have Info files, so every program needs 'infodir', but not all need 'libdir' or 'lispdir'. 'includedir' The directory for installing header files to be included by user programs with the C '#include' preprocessor directive. This should normally be '/usr/local/include', but write it as '$(prefix)/include'. (If you are using Autoconf, write it as '@includedir@'.) Most compilers other than GCC do not look for header files in directory '/usr/local/include'. So installing the header files this way is only useful with GCC. Sometimes this is not a problem because some libraries are only really intended to work with GCC. But some libraries are intended to work with other compilers. They should install their header files in two places, one specified by 'includedir' and one specified by 'oldincludedir'. 'oldincludedir' The directory for installing '#include' header files for use with compilers other than GCC. This should normally be '/usr/include'. (If you are using Autoconf, you can write it as '@oldincludedir@'.) The Makefile commands should check whether the value of 'oldincludedir' is empty. If it is, they should not try to use it; they should cancel the second installation of the header files. A package should not replace an existing header in this directory unless the header came from the same package. Thus, if your Foo package provides a header file 'foo.h', then it should install the header file in the 'oldincludedir' directory if either (1) there is no 'foo.h' there or (2) the 'foo.h' that exists came from the Foo package. To tell whether 'foo.h' came from the Foo package, put a magic string in the file--part of a comment--and 'grep' for that string. 'docdir' The directory for installing documentation files (other than Info) for this package. By default, it should be '/usr/local/share/doc/YOURPKG', but it should be written as '$(datarootdir)/doc/YOURPKG'. (If you are using Autoconf, write it as '@docdir@'.) The YOURPKG subdirectory, which may include a version number, prevents collisions among files with common names, such as 'README'. 'infodir' The directory for installing the Info files for this package. By default, it should be '/usr/local/share/info', but it should be written as '$(datarootdir)/info'. (If you are using Autoconf, write it as '@infodir@'.) 'infodir' is separate from 'docdir' for compatibility with existing practice. 'htmldir' 'dvidir' 'pdfdir' 'psdir' Directories for installing documentation files in the particular format. They should all be set to '$(docdir)' by default. (If you are using Autoconf, write them as '@htmldir@', '@dvidir@', etc.) Packages which supply several translations of their documentation should install them in '$(htmldir)/'LL, '$(pdfdir)/'LL, etc. where LL is a locale abbreviation such as 'en' or 'pt_BR'. 'libdir' The directory for object files and libraries of object code. Do not install executables here, they probably ought to go in '$(libexecdir)' instead. The value of 'libdir' should normally be '/usr/local/lib', but write it as '$(exec_prefix)/lib'. (If you are using Autoconf, write it as '@libdir@'.) 'lispdir' The directory for installing any Emacs Lisp files in this package. By default, it should be '/usr/local/share/emacs/site-lisp', but it should be written as '$(datarootdir)/emacs/site-lisp'. If you are using Autoconf, write the default as '@lispdir@'. In order to make '@lispdir@' work, you need the following lines in your 'configure.ac' file: lispdir='${datarootdir}/emacs/site-lisp' AC_SUBST(lispdir) 'localedir' The directory for installing locale-specific message catalogs for this package. By default, it should be '/usr/local/share/locale', but it should be written as '$(datarootdir)/locale'. (If you are using Autoconf, write it as '@localedir@'.) This directory usually has a subdirectory per locale. Unix-style man pages are installed in one of the following: 'mandir' The top-level directory for installing the man pages (if any) for this package. It will normally be '/usr/local/share/man', but you should write it as '$(datarootdir)/man'. (If you are using Autoconf, write it as '@mandir@'.) 'man1dir' The directory for installing section 1 man pages. Write it as '$(mandir)/man1'. 'man2dir' The directory for installing section 2 man pages. Write it as '$(mandir)/man2' '...' *Don't make the primary documentation for any GNU software be a man page. Write a manual in Texinfo instead. Man pages are just for the sake of people running GNU software on Unix, which is a secondary application only.* 'manext' The file name extension for the installed man page. This should contain a period followed by the appropriate digit; it should normally be '.1'. 'man1ext' The file name extension for installed section 1 man pages. 'man2ext' The file name extension for installed section 2 man pages. '...' Use these names instead of 'manext' if the package needs to install man pages in more than one section of the manual. And finally, you should set the following variable: 'srcdir' The directory for the sources being compiled. The value of this variable is normally inserted by the 'configure' shell script. (If you are using Autoconf, use 'srcdir = @srcdir@'.) For example: # Common prefix for installation directories. # NOTE: This directory must exist when you start the install. prefix = /usr/local datarootdir = $(prefix)/share datadir = $(datarootdir) exec_prefix = $(prefix) # Where to put the executable for the command 'gcc'. bindir = $(exec_prefix)/bin # Where to put the directories used by the compiler. libexecdir = $(exec_prefix)/libexec # Where to put the Info files. infodir = $(datarootdir)/info If your program installs a large number of files into one of the standard user-specified directories, it might be useful to group them into a subdirectory particular to that program. If you do this, you should write the 'install' rule to create these subdirectories. Do not expect the user to include the subdirectory name in the value of any of the variables listed above. The idea of having a uniform set of variable names for installation directories is to enable the user to specify the exact same values for several different GNU packages. In order for this to be useful, all the packages must be designed so that they will work sensibly when the user does so. At times, not all of these variables may be implemented in the current release of Autoconf and/or Automake; but as of Autoconf 2.60, we believe all of them are. When any are missing, the descriptions here serve as specifications for what Autoconf will implement. As a programmer, you can either use a development version of Autoconf or avoid using these variables until a stable release is made which supports them.  File: standards.info, Node: Standard Targets, Next: Install Command Categories, Prev: Directory Variables, Up: Makefile Conventions 7.2.6 Standard Targets for Users -------------------------------- All GNU programs should have the following targets in their Makefiles: 'all' Compile the entire program. This should be the default target. This target need not rebuild any documentation files; Info files should normally be included in the distribution, and DVI (and other documentation format) files should be made only when explicitly asked for. By default, the Make rules should compile and link with '-g', so that executable programs have debugging symbols. Otherwise, you are essentially helpless in the face of a crash, and it is often far from easy to reproduce with a fresh build. 'install' Compile the program and copy the executables, libraries, and so on to the file names where they should reside for actual use. If there is a simple test to verify that a program is properly installed, this target should run that test. Do not strip executables when installing them. This helps eventual debugging that may be needed later, and nowadays disk space is cheap and dynamic loaders typically ensure debug sections are not loaded during normal execution. Users that need stripped binaries may invoke the 'install-strip' target to do that. If possible, write the 'install' target rule so that it does not modify anything in the directory where the program was built, provided 'make all' has just been done. This is convenient for building the program under one user name and installing it under another. The commands should create all the directories in which files are to be installed, if they don't already exist. This includes the directories specified as the values of the variables 'prefix' and 'exec_prefix', as well as all subdirectories that are needed. One way to do this is by means of an 'installdirs' target as described below. Use '-' before any command for installing a man page, so that 'make' will ignore any errors. This is in case there are systems that don't have the Unix man page documentation system installed. The way to install Info files is to copy them into '$(infodir)' with '$(INSTALL_DATA)' (*note Command Variables::), and then run the 'install-info' program if it is present. 'install-info' is a program that edits the Info 'dir' file to add or update the menu entry for the given Info file; it is part of the Texinfo package. Here is a sample rule to install an Info file that also tries to handle some additional situations, such as 'install-info' not being present. do-install-info: foo.info installdirs $(NORMAL_INSTALL) # Prefer an info file in . to one in srcdir. if test -f foo.info; then d=.; \ else d="$(srcdir)"; fi; \ $(INSTALL_DATA) $$d/foo.info \ "$(DESTDIR)$(infodir)/foo.info" # Run install-info only if it exists. # Use 'if' instead of just prepending '-' to the # line so we notice real errors from install-info. # Use '$(SHELL) -c' because some shells do not # fail gracefully when there is an unknown command. $(POST_INSTALL) if $(SHELL) -c 'install-info --version' \ >/dev/null 2>&1; then \ install-info --dir-file="$(DESTDIR)$(infodir)/dir" \ "$(DESTDIR)$(infodir)/foo.info"; \ else true; fi When writing the 'install' target, you must classify all the commands into three categories: normal ones, "pre-installation" commands and "post-installation" commands. *Note Install Command Categories::. 'install-html' 'install-dvi' 'install-pdf' 'install-ps' These targets install documentation in formats other than Info; they're intended to be called explicitly by the person installing the package, if that format is desired. GNU prefers Info files, so these must be installed by the 'install' target. When you have many documentation files to install, we recommend that you avoid collisions and clutter by arranging for these targets to install in subdirectories of the appropriate installation directory, such as 'htmldir'. As one example, if your package has multiple manuals, and you wish to install HTML documentation with many files (such as the "split" mode output by 'makeinfo --html'), you'll certainly want to use subdirectories, or two nodes with the same name in different manuals will overwrite each other. Please make these 'install-FORMAT' targets invoke the commands for the FORMAT target, for example, by making FORMAT a dependency. 'uninstall' Delete all the installed files--the copies that the 'install' and 'install-*' targets create. This rule should not modify the directories where compilation is done, only the directories where files are installed. The uninstallation commands are divided into three categories, just like the installation commands. *Note Install Command Categories::. 'install-strip' Like 'install', but strip the executable files while installing them. In simple cases, this target can use the 'install' target in a simple way: install-strip: $(MAKE) INSTALL_PROGRAM='$(INSTALL_PROGRAM) -s' \ install But if the package installs scripts as well as real executables, the 'install-strip' target can't just refer to the 'install' target; it has to strip the executables but not the scripts. 'install-strip' should not strip the executables in the build directory which are being copied for installation. It should only strip the copies that are installed. Normally we do not recommend stripping an executable unless you are sure the program has no bugs. However, it can be reasonable to install a stripped executable for actual execution while saving the unstripped executable elsewhere in case there is a bug. 'clean' Delete all files in the current directory that are normally created by building the program. Also delete files in other directories if they are created by this makefile. However, don't delete the files that record the configuration. Also preserve files that could be made by building, but normally aren't because the distribution comes with them. There is no need to delete parent directories that were created with 'mkdir -p', since they could have existed anyway. Delete '.dvi' files here if they are not part of the distribution. 'distclean' Delete all files in the current directory (or created by this makefile) that are created by configuring or building the program. If you have unpacked the source and built the program without creating any other files, 'make distclean' should leave only the files that were in the distribution. However, there is no need to delete parent directories that were created with 'mkdir -p', since they could have existed anyway. 'mostlyclean' Like 'clean', but may refrain from deleting a few files that people normally don't want to recompile. For example, the 'mostlyclean' target for GCC does not delete 'libgcc.a', because recompiling it is rarely necessary and takes a lot of time. 'maintainer-clean' Delete almost everything that can be reconstructed with this Makefile. This typically includes everything deleted by 'distclean', plus more: C source files produced by Bison, tags tables, Info files, and so on. The reason we say "almost everything" is that running the command 'make maintainer-clean' should not delete 'configure' even if 'configure' can be remade using a rule in the Makefile. More generally, 'make maintainer-clean' should not delete anything that needs to exist in order to run 'configure' and then begin to build the program. Also, there is no need to delete parent directories that were created with 'mkdir -p', since they could have existed anyway. These are the only exceptions; 'maintainer-clean' should delete everything else that can be rebuilt. The 'maintainer-clean' target is intended to be used by a maintainer of the package, not by ordinary users. You may need special tools to reconstruct some of the files that 'make maintainer-clean' deletes. Since these files are normally included in the distribution, we don't take care to make them easy to reconstruct. If you find you need to unpack the full distribution again, don't blame us. To help make users aware of this, the commands for the special 'maintainer-clean' target should start with these two: @echo 'This command is intended for maintainers to use; it' @echo 'deletes files that may need special tools to rebuild.' 'TAGS' Update a tags table for this program. 'info' Generate any Info files needed. The best way to write the rules is as follows: info: foo.info foo.info: foo.texi chap1.texi chap2.texi $(MAKEINFO) $(srcdir)/foo.texi You must define the variable 'MAKEINFO' in the Makefile. It should run the 'makeinfo' program, which is part of the Texinfo distribution. Normally a GNU distribution comes with Info files, and that means the Info files are present in the source directory. Therefore, the Make rule for an info file should update it in the source directory. When users build the package, ordinarily Make will not update the Info files because they will already be up to date. 'dvi' 'html' 'pdf' 'ps' Generate documentation files in the given format. These targets should always exist, but any or all can be a no-op if the given output format cannot be generated. These targets should not be dependencies of the 'all' target; the user must manually invoke them. Here's an example rule for generating DVI files from Texinfo: dvi: foo.dvi foo.dvi: foo.texi chap1.texi chap2.texi $(TEXI2DVI) $(srcdir)/foo.texi You must define the variable 'TEXI2DVI' in the Makefile. It should run the program 'texi2dvi', which is part of the Texinfo distribution. ('texi2dvi' uses TeX to do the real work of formatting. TeX is not distributed with Texinfo.) Alternatively, write only the dependencies, and allow GNU 'make' to provide the command. Here's another example, this one for generating HTML from Texinfo: html: foo.html foo.html: foo.texi chap1.texi chap2.texi $(TEXI2HTML) $(srcdir)/foo.texi Again, you would define the variable 'TEXI2HTML' in the Makefile; for example, it might run 'makeinfo --no-split --html' ('makeinfo' is part of the Texinfo distribution). 'dist' Create a distribution tar file for this program. The tar file should be set up so that the file names in the tar file start with a subdirectory name which is the name of the package it is a distribution for. This name can include the version number. For example, the distribution tar file of GCC version 1.40 unpacks into a subdirectory named 'gcc-1.40'. The easiest way to do this is to create a subdirectory appropriately named, use 'ln' or 'cp' to install the proper files in it, and then 'tar' that subdirectory. Compress the tar file with 'gzip'. For example, the actual distribution file for GCC version 1.40 is called 'gcc-1.40.tar.gz'. It is ok to support other free compression formats as well. The 'dist' target should explicitly depend on all non-source files that are in the distribution, to make sure they are up to date in the distribution. *Note Making Releases: Releases. 'check' Perform self-tests (if any). The user must build the program before running the tests, but need not install the program; you should write the self-tests so that they work when the program is built but not installed. The following targets are suggested as conventional names, for programs in which they are useful. 'installcheck' Perform installation tests (if any). The user must build and install the program before running the tests. You should not assume that '$(bindir)' is in the search path. 'installdirs' It's useful to add a target named 'installdirs' to create the directories where files are installed, and their parent directories. There is a script called 'mkinstalldirs' which is convenient for this; you can find it in the Gnulib package. You can use a rule like this: # Make sure all installation directories (e.g. $(bindir)) # actually exist by making them if necessary. installdirs: mkinstalldirs $(srcdir)/mkinstalldirs $(bindir) $(datadir) \ $(libdir) $(infodir) \ $(mandir) or, if you wish to support 'DESTDIR' (strongly encouraged), # Make sure all installation directories (e.g. $(bindir)) # actually exist by making them if necessary. installdirs: mkinstalldirs $(srcdir)/mkinstalldirs \ $(DESTDIR)$(bindir) $(DESTDIR)$(datadir) \ $(DESTDIR)$(libdir) $(DESTDIR)$(infodir) \ $(DESTDIR)$(mandir) This rule should not modify the directories where compilation is done. It should do nothing but create installation directories.  File: standards.info, Node: Install Command Categories, Prev: Standard Targets, Up: Makefile Conventions 7.2.7 Install Command Categories -------------------------------- When writing the 'install' target, you must classify all the commands into three categories: normal ones, "pre-installation" commands and "post-installation" commands. Normal commands move files into their proper places, and set their modes. They may not alter any files except the ones that come entirely from the package they belong to. Pre-installation and post-installation commands may alter other files; in particular, they can edit global configuration files or data bases. Pre-installation commands are typically executed before the normal commands, and post-installation commands are typically run after the normal commands. The most common use for a post-installation command is to run 'install-info'. This cannot be done with a normal command, since it alters a file (the Info directory) which does not come entirely and solely from the package being installed. It is a post-installation command because it needs to be done after the normal command which installs the package's Info files. Most programs don't need any pre-installation commands, but we have the feature just in case it is needed. To classify the commands in the 'install' rule into these three categories, insert "category lines" among them. A category line specifies the category for the commands that follow. A category line consists of a tab and a reference to a special Make variable, plus an optional comment at the end. There are three variables you can use, one for each category; the variable name specifies the category. Category lines are no-ops in ordinary execution because these three Make variables are normally undefined (and you _should not_ define them in the makefile). Here are the three possible category lines, each with a comment that explains what it means: $(PRE_INSTALL) # Pre-install commands follow. $(POST_INSTALL) # Post-install commands follow. $(NORMAL_INSTALL) # Normal commands follow. If you don't use a category line at the beginning of the 'install' rule, all the commands are classified as normal until the first category line. If you don't use any category lines, all the commands are classified as normal. These are the category lines for 'uninstall': $(PRE_UNINSTALL) # Pre-uninstall commands follow. $(POST_UNINSTALL) # Post-uninstall commands follow. $(NORMAL_UNINSTALL) # Normal commands follow. Typically, a pre-uninstall command would be used for deleting entries from the Info directory. If the 'install' or 'uninstall' target has any dependencies which act as subroutines of installation, then you should start _each_ dependency's commands with a category line, and start the main target's commands with a category line also. This way, you can ensure that each command is placed in the right category regardless of which of the dependencies actually run. Pre-installation and post-installation commands should not run any programs except for these: [ basename bash cat chgrp chmod chown cmp cp dd diff echo egrep expand expr false fgrep find getopt grep gunzip gzip hostname install install-info kill ldconfig ln ls md5sum mkdir mkfifo mknod mv printenv pwd rm rmdir sed sort tee test touch true uname xargs yes The reason for distinguishing the commands in this way is for the sake of making binary packages. Typically a binary package contains all the executables and other files that need to be installed, and has its own method of installing them--so it does not need to run the normal installation commands. But installing the binary package does need to execute the pre-installation and post-installation commands. Programs to build binary packages work by extracting the pre-installation and post-installation commands. Here is one way of extracting the pre-installation commands (the '-s' option to 'make' is needed to silence messages about entering subdirectories): make -s -n install -o all \ PRE_INSTALL=pre-install \ POST_INSTALL=post-install \ NORMAL_INSTALL=normal-install \ | gawk -f pre-install.awk where the file 'pre-install.awk' could contain this: $0 ~ /^(normal-install|post-install)[ \t]*$/ {on = 0} on {print $0} $0 ~ /^pre-install[ \t]*$/ {on = 1}  File: standards.info, Node: Releases, Prev: Makefile Conventions, Up: Managing Releases 7.3 Making Releases =================== You should identify each release with a pair of version numbers, a major version and a minor. We have no objection to using more than two numbers, but it is very unlikely that you really need them. Package the distribution of 'Foo version 69.96' up in a gzipped tar file with the name 'foo-69.96.tar.gz'. It should unpack into a subdirectory named 'foo-69.96'. Building and installing the program should never modify any of the files contained in the distribution. This means that all the files that form part of the program in any way must be classified into "source files" and "non-source files". Source files are written by humans and never changed automatically; non-source files are produced from source files by programs under the control of the Makefile. The distribution should contain a file named 'README' with a general overview of the package: * the name of the package; * the version number of the package, or refer to where in the package the version can be found; * a general description of what the package does; * a reference to the file 'INSTALL', which should in turn contain an explanation of the installation procedure; * a brief explanation of any unusual top-level directories or files, or other hints for readers to find their way around the source; * a reference to the file which contains the copying conditions. The GNU GPL, if used, should be in a file called 'COPYING'. If the GNU LGPL is used, it should be in a file called 'COPYING.LESSER'. Naturally, all the source files must be in the distribution. It is okay to include non-source files in the distribution along with the source files they are generated from, provided they are up-to-date with the source they are made from, and machine-independent, so that normal building of the distribution will never modify them. We commonly include non-source files produced by Autoconf, Automake, Bison, 'flex', TeX, and 'makeinfo'; this helps avoid unnecessary dependencies between our distributions, so that users can install whichever versions of whichever packages they like. Do not induce new dependencies on other software lightly. Non-source files that might actually be modified by building and installing the program should *never* be included in the distribution. So if you do distribute non-source files, always make sure they are up to date when you make a new distribution. Make sure that all the files in the distribution are world-readable, and that directories are world-readable and world-searchable (octal mode 755). We used to recommend that all directories in the distribution also be world-writable (octal mode 777), because ancient versions of 'tar' would otherwise not cope when extracting the archive as an unprivileged user. That can easily lead to security issues when creating the archive, however, so now we recommend against that. Don't include any symbolic links in the distribution itself. If the tar file contains symbolic links, then people cannot even unpack it on systems that don't support symbolic links. Also, don't use multiple names for one file in different directories, because certain file systems cannot handle this and that prevents unpacking the distribution. Try to make sure that all the file names will be unique on MS-DOS. A name on MS-DOS consists of up to 8 characters, optionally followed by a period and up to three characters. MS-DOS will truncate extra characters both before and after the period. Thus, 'foobarhacker.c' and 'foobarhacker.o' are not ambiguous; they are truncated to 'foobarha.c' and 'foobarha.o', which are distinct. Include in your distribution a copy of the 'texinfo.tex' you used to test print any '*.texinfo' or '*.texi' files. Likewise, if your program uses small GNU software packages like regex, getopt, obstack, or termcap, include them in the distribution file. Leaving them out would make the distribution file a little smaller at the expense of possible inconvenience to a user who doesn't know what other files to get.  File: standards.info, Node: References, Next: GNU Free Documentation License, Prev: Managing Releases, Up: Top 8 References to Non-Free Software and Documentation *************************************************** A GNU program should not recommend, promote, or grant legitimacy to the use of any non-free program. Proprietary software is a social and ethical problem, and our aim is to put an end to that problem. We can't stop some people from writing proprietary programs, or stop other people from using them, but we can and should refuse to advertise them to new potential customers, or to give the public the impression that their existence is legitimate. The GNU definition of free software is found on the GNU web site at , and the definition of free documentation is found at . The terms "free" and "non-free", used in this document, refer to those definitions. A list of important licenses and whether they qualify as free is in . If it is not clear whether a license qualifies as free, please ask the GNU Project by writing to . We will answer, and if the license is an important one, we will add it to the list. When a non-free program or system is well known, you can mention it in passing--that is harmless, since users who might want to use it probably already know about it. For instance, it is fine to explain how to build your package on top of some widely used non-free operating system, or how to use it together with some widely used non-free program, after first explaining how to use it on the GNU system. However, you should give only the necessary information to help those who already use the non-free program to use your program with it--don't give, or refer to, any further information about the proprietary program, and don't imply that the proprietary program enhances your program, or that its existence is in any way a good thing. The goal should be that people already using the proprietary program will get the advice they need about how to use your free program with it, while people who don't already use the proprietary program will not see anything likely to lead them to take an interest in it. You shouldn't recommend any non-free add-ons for the non-free program, but it is ok to mention free add-ons that help it to work with your program, and how to install the free add-ons even if that requires running some non-free program. If a non-free program or system is obscure in your program's domain, your program should not mention or support it at all, since doing so would tend to popularize the non-free program more than it popularizes your program. (You cannot hope to find many additional users for your program among the users of Foobar, if the existence of Foobar is not generally known among people who might want to use your program.) Sometimes a program is free software in itself but depends on a non-free platform in order to run. For instance, it used to be the case that many Java programs depended on some non-free Java libraries. (See .) To recommend or promote such a program is to promote the other programs it needs; therefore, judge mentions of the former as if they were mentions of the latter. For this reason, we were careful about listing Java programs in the Free Software Directory: we wanted to avoid promoting the non-free Java libraries. Java no longer has this problem, but the general principle will remain the same: don't recommend, promote or legitimize programs that depend on non-free software to run. Some free programs strongly encourage the use of non-free software. A typical example is 'mplayer'. It is free software in itself, and the free code can handle some kinds of files. However, 'mplayer' recommends use of non-free codecs for other kinds of files, and users that install 'mplayer' are very likely to install those codecs along with it. To recommend 'mplayer' is, in effect, to promote use of the non-free codecs. Thus, you should not recommend programs that strongly encourage the use of non-free software. This is why we do not list 'mplayer' in the Free Software Directory. A GNU package should not refer the user to any non-free documentation for free software. Free documentation that can be included in free operating systems is essential for completing the GNU system, or any free operating system, so encouraging it is a priority; to recommend use of documentation that we are not allowed to include undermines the impetus for the community to produce documentation that we can include. So GNU packages should never recommend non-free documentation. By contrast, it is ok to refer to journal articles and textbooks in the comments of a program for explanation of how it functions, even though they are non-free. This is because we don't include such things in the GNU system even if they are free--they are outside the scope of what a software distribution needs to include. Referring to a web site that describes or recommends a non-free program is promoting that program, so please do not make links to (or mention by name) web sites that contain such material. This policy is relevant particularly for the web pages for a GNU package. What about chains of links? Following links from nearly any web site can lead eventually to promotion of non-free software; this is inherent in the nature of the web. Here's how we treat that. You should not refer to AT&T's web site if that recommends AT&T's non-free software packages; you should not refer to a page P that links to AT&T's site presenting it as a place to get some non-free program, because that part of the page P itself recommends and legitimizes the non-free program. However, if P contains a link to AT&T's web site for some other purpose (such as long-distance telephone service), that is no reason you should not link to P. A web page recommends a program in an implicit but particularly strong way if it requires users to run that program in order to use the page. Many pages contain Javascript code which they recommend in this way. This Javascript code may be free or nonfree, but nonfree is the usual case. If the purpose for which you would refer to the page cannot be carried out without running nonfree Javascript code, then you should not refer to it. Thus, if the purpose of referring to the page is for people to view a video, or subscribing to a mailing list, and the viewing or subscribing fail to work if the user's browser blocks the nonfree Javascript code, then don't refer to that page. The extreme case is that of web sites which depend on nonfree Javascript code even to _see_ the contents of the pages. Any site hosted on 'wix.com' has this problem, and so do some other sites. Referring people to such pages to read their contents is, in effect, urging them to run those nonfree programs--so please don't refer to those pages. (Such pages also break the Web, so they deserve condemnation for two reasons.) Instead, please quote excerpts from the page to make your point, or find another place to refer to that information.  File: standards.info, Node: GNU Free Documentation License, Next: Index, Prev: References, Up: Top Appendix A GNU Free Documentation License ***************************************** Version 1.3, 3 November 2008 Copyright (C) 2000, 2001, 2002, 2007, 2008 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. 0. PREAMBLE The purpose of this License is to make a manual, textbook, or other functional and useful document "free" in the sense of freedom: to assure everyone the effective freedom to copy and redistribute it, with or without modifying it, either commercially or noncommercially. Secondarily, this License preserves for the author and publisher a way to get credit for their work, while not being considered responsible for modifications made by others. This License is a kind of "copyleft", which means that derivative works of the document must themselves be free in the same sense. It complements the GNU General Public License, which is a copyleft license designed for free software. We have designed this License in order to use it for manuals for free software, because free software needs free documentation: a free program should come with manuals providing the same freedoms that the software does. But this License is not limited to software manuals; it can be used for any textual work, regardless of subject matter or whether it is published as a printed book. We recommend this License principally for works whose purpose is instruction or reference. 1. APPLICABILITY AND DEFINITIONS This License applies to any manual or other work, in any medium, that contains a notice placed by the copyright holder saying it can be distributed under the terms of this License. Such a notice grants a world-wide, royalty-free license, unlimited in duration, to use that work under the conditions stated herein. The "Document", below, refers to any such manual or work. Any member of the public is a licensee, and is addressed as "you". You accept the license if you copy, modify or distribute the work in a way requiring permission under copyright law. A "Modified Version" of the Document means any work containing the Document or a portion of it, either copied verbatim, or with modifications and/or translated into another language. A "Secondary Section" is a named appendix or a front-matter section of the Document that deals exclusively with the relationship of the publishers or authors of the Document to the Document's overall subject (or to related matters) and contains nothing that could fall directly within that overall subject. (Thus, if the Document is in part a textbook of mathematics, a Secondary Section may not explain any mathematics.) The relationship could be a matter of historical connection with the subject or with related matters, or of legal, commercial, philosophical, ethical or political position regarding them. The "Invariant Sections" are certain Secondary Sections whose titles are designated, as being those of Invariant Sections, in the notice that says that the Document is released under this License. If a section does not fit the above definition of Secondary then it is not allowed to be designated as Invariant. The Document may contain zero Invariant Sections. If the Document does not identify any Invariant Sections then there are none. The "Cover Texts" are certain short passages of text that are listed, as Front-Cover Texts or Back-Cover Texts, in the notice that says that the Document is released under this License. A Front-Cover Text may be at most 5 words, and a Back-Cover Text may be at most 25 words. A "Transparent" copy of the Document means a machine-readable copy, represented in a format whose specification is available to the general public, that is suitable for revising the document straightforwardly with generic text editors or (for images composed of pixels) generic paint programs or (for drawings) some widely available drawing editor, and that is suitable for input to text formatters or for automatic translation to a variety of formats suitable for input to text formatters. A copy made in an otherwise Transparent file format whose markup, or absence of markup, has been arranged to thwart or discourage subsequent modification by readers is not Transparent. An image format is not Transparent if used for any substantial amount of text. A copy that is not "Transparent" is called "Opaque". Examples of suitable formats for Transparent copies include plain ASCII without markup, Texinfo input format, LaTeX input format, SGML or XML using a publicly available DTD, and standard-conforming simple HTML, PostScript or PDF designed for human modification. Examples of transparent image formats include PNG, XCF and JPG. Opaque formats include proprietary formats that can be read and edited only by proprietary word processors, SGML or XML for which the DTD and/or processing tools are not generally available, and the machine-generated HTML, PostScript or PDF produced by some word processors for output purposes only. The "Title Page" means, for a printed book, the title page itself, plus such following pages as are needed to hold, legibly, the material this License requires to appear in the title page. For works in formats which do not have any title page as such, "Title Page" means the text near the most prominent appearance of the work's title, preceding the beginning of the body of the text. The "publisher" means any person or entity that distributes copies of the Document to the public. A section "Entitled XYZ" means a named subunit of the Document whose title either is precisely XYZ or contains XYZ in parentheses following text that translates XYZ in another language. (Here XYZ stands for a specific section name mentioned below, such as "Acknowledgements", "Dedications", "Endorsements", or "History".) To "Preserve the Title" of such a section when you modify the Document means that it remains a section "Entitled XYZ" according to this definition. The Document may include Warranty Disclaimers next to the notice which states that this License applies to the Document. These Warranty Disclaimers are considered to be included by reference in this License, but only as regards disclaiming warranties: any other implication that these Warranty Disclaimers may have is void and has no effect on the meaning of this License. 2. VERBATIM COPYING You may copy and distribute the Document in any medium, either commercially or noncommercially, provided that this License, the copyright notices, and the license notice saying this License applies to the Document are reproduced in all copies, and that you add no other conditions whatsoever to those of this License. You may not use technical measures to obstruct or control the reading or further copying of the copies you make or distribute. However, you may accept compensation in exchange for copies. If you distribute a large enough number of copies you must also follow the conditions in section 3. You may also lend copies, under the same conditions stated above, and you may publicly display copies. 3. COPYING IN QUANTITY If you publish printed copies (or copies in media that commonly have printed covers) of the Document, numbering more than 100, and the Document's license notice requires Cover Texts, you must enclose the copies in covers that carry, clearly and legibly, all these Cover Texts: Front-Cover Texts on the front cover, and Back-Cover Texts on the back cover. Both covers must also clearly and legibly identify you as the publisher of these copies. The front cover must present the full title with all words of the title equally prominent and visible. You may add other material on the covers in addition. Copying with changes limited to the covers, as long as they preserve the title of the Document and satisfy these conditions, can be treated as verbatim copying in other respects. If the required texts for either cover are too voluminous to fit legibly, you should put the first ones listed (as many as fit reasonably) on the actual cover, and continue the rest onto adjacent pages. If you publish or distribute Opaque copies of the Document numbering more than 100, you must either include a machine-readable Transparent copy along with each Opaque copy, or state in or with each Opaque copy a computer-network location from which the general network-using public has access to download using public-standard network protocols a complete Transparent copy of the Document, free of added material. If you use the latter option, you must take reasonably prudent steps, when you begin distribution of Opaque copies in quantity, to ensure that this Transparent copy will remain thus accessible at the stated location until at least one year after the last time you distribute an Opaque copy (directly or through your agents or retailers) of that edition to the public. It is requested, but not required, that you contact the authors of the Document well before redistributing any large number of copies, to give them a chance to provide you with an updated version of the Document. 4. MODIFICATIONS You may copy and distribute a Modified Version of the Document under the conditions of sections 2 and 3 above, provided that you release the Modified Version under precisely this License, with the Modified Version filling the role of the Document, thus licensing distribution and modification of the Modified Version to whoever possesses a copy of it. In addition, you must do these things in the Modified Version: A. Use in the Title Page (and on the covers, if any) a title distinct from that of the Document, and from those of previous versions (which should, if there were any, be listed in the History section of the Document). You may use the same title as a previous version if the original publisher of that version gives permission. B. List on the Title Page, as authors, one or more persons or entities responsible for authorship of the modifications in the Modified Version, together with at least five of the principal authors of the Document (all of its principal authors, if it has fewer than five), unless they release you from this requirement. C. State on the Title page the name of the publisher of the Modified Version, as the publisher. D. Preserve all the copyright notices of the Document. E. Add an appropriate copyright notice for your modifications adjacent to the other copyright notices. F. Include, immediately after the copyright notices, a license notice giving the public permission to use the Modified Version under the terms of this License, in the form shown in the Addendum below. G. Preserve in that license notice the full lists of Invariant Sections and required Cover Texts given in the Document's license notice. H. Include an unaltered copy of this License. I. Preserve the section Entitled "History", Preserve its Title, and add to it an item stating at least the title, year, new authors, and publisher of the Modified Version as given on the Title Page. If there is no section Entitled "History" in the Document, create one stating the title, year, authors, and publisher of the Document as given on its Title Page, then add an item describing the Modified Version as stated in the previous sentence. J. Preserve the network location, if any, given in the Document for public access to a Transparent copy of the Document, and likewise the network locations given in the Document for previous versions it was based on. These may be placed in the "History" section. You may omit a network location for a work that was published at least four years before the Document itself, or if the original publisher of the version it refers to gives permission. K. For any section Entitled "Acknowledgements" or "Dedications", Preserve the Title of the section, and preserve in the section all the substance and tone of each of the contributor acknowledgements and/or dedications given therein. L. Preserve all the Invariant Sections of the Document, unaltered in their text and in their titles. Section numbers or the equivalent are not considered part of the section titles. M. Delete any section Entitled "Endorsements". Such a section may not be included in the Modified Version. N. Do not retitle any existing section to be Entitled "Endorsements" or to conflict in title with any Invariant Section. O. Preserve any Warranty Disclaimers. If the Modified Version includes new front-matter sections or appendices that qualify as Secondary Sections and contain no material copied from the Document, you may at your option designate some or all of these sections as invariant. To do this, add their titles to the list of Invariant Sections in the Modified Version's license notice. These titles must be distinct from any other section titles. You may add a section Entitled "Endorsements", provided it contains nothing but endorsements of your Modified Version by various parties--for example, statements of peer review or that the text has been approved by an organization as the authoritative definition of a standard. You may add a passage of up to five words as a Front-Cover Text, and a passage of up to 25 words as a Back-Cover Text, to the end of the list of Cover Texts in the Modified Version. Only one passage of Front-Cover Text and one of Back-Cover Text may be added by (or through arrangements made by) any one entity. If the Document already includes a cover text for the same cover, previously added by you or by arrangement made by the same entity you are acting on behalf of, you may not add another; but you may replace the old one, on explicit permission from the previous publisher that added the old one. The author(s) and publisher(s) of the Document do not by this License give permission to use their names for publicity for or to assert or imply endorsement of any Modified Version. 5. COMBINING DOCUMENTS You may combine the Document with other documents released under this License, under the terms defined in section 4 above for modified versions, provided that you include in the combination all of the Invariant Sections of all of the original documents, unmodified, and list them all as Invariant Sections of your combined work in its license notice, and that you preserve all their Warranty Disclaimers. The combined work need only contain one copy of this License, and multiple identical Invariant Sections may be replaced with a single copy. If there are multiple Invariant Sections with the same name but different contents, make the title of each such section unique by adding at the end of it, in parentheses, the name of the original author or publisher of that section if known, or else a unique number. Make the same adjustment to the section titles in the list of Invariant Sections in the license notice of the combined work. In the combination, you must combine any sections Entitled "History" in the various original documents, forming one section Entitled "History"; likewise combine any sections Entitled "Acknowledgements", and any sections Entitled "Dedications". You must delete all sections Entitled "Endorsements." 6. COLLECTIONS OF DOCUMENTS You may make a collection consisting of the Document and other documents released under this License, and replace the individual copies of this License in the various documents with a single copy that is included in the collection, provided that you follow the rules of this License for verbatim copying of each of the documents in all other respects. You may extract a single document from such a collection, and distribute it individually under this License, provided you insert a copy of this License into the extracted document, and follow this License in all other respects regarding verbatim copying of that document. 7. AGGREGATION WITH INDEPENDENT WORKS A compilation of the Document or its derivatives with other separate and independent documents or works, in or on a volume of a storage or distribution medium, is called an "aggregate" if the copyright resulting from the compilation is not used to limit the legal rights of the compilation's users beyond what the individual works permit. When the Document is included in an aggregate, this License does not apply to the other works in the aggregate which are not themselves derivative works of the Document. If the Cover Text requirement of section 3 is applicable to these copies of the Document, then if the Document is less than one half of the entire aggregate, the Document's Cover Texts may be placed on covers that bracket the Document within the aggregate, or the electronic equivalent of covers if the Document is in electronic form. Otherwise they must appear on printed covers that bracket the whole aggregate. 8. TRANSLATION Translation is considered a kind of modification, so you may distribute translations of the Document under the terms of section 4. Replacing Invariant Sections with translations requires special permission from their copyright holders, but you may include translations of some or all Invariant Sections in addition to the original versions of these Invariant Sections. You may include a translation of this License, and all the license notices in the Document, and any Warranty Disclaimers, provided that you also include the original English version of this License and the original versions of those notices and disclaimers. In case of a disagreement between the translation and the original version of this License or a notice or disclaimer, the original version will prevail. If a section in the Document is Entitled "Acknowledgements", "Dedications", or "History", the requirement (section 4) to Preserve its Title (section 1) will typically require changing the actual title. 9. TERMINATION You may not copy, modify, sublicense, or distribute the Document except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, or distribute it is void, and will automatically terminate your rights under this License. However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, receipt of a copy of some or all of the same material does not give you any rights to use it. 10. FUTURE REVISIONS OF THIS LICENSE The Free Software Foundation may publish new, revised versions of the GNU Free Documentation License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. See . Each version of the License is given a distinguishing version number. If the Document specifies that a particular numbered version of this License "or any later version" applies to it, you have the option of following the terms and conditions either of that specified version or of any later version that has been published (not as a draft) by the Free Software Foundation. If the Document does not specify a version number of this License, you may choose any version ever published (not as a draft) by the Free Software Foundation. If the Document specifies that a proxy can decide which future versions of this License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Document. 11. RELICENSING "Massive Multiauthor Collaboration Site" (or "MMC Site") means any World Wide Web server that publishes copyrightable works and also provides prominent facilities for anybody to edit those works. A public wiki that anybody can edit is an example of such a server. A "Massive Multiauthor Collaboration" (or "MMC") contained in the site means any set of copyrightable works thus published on the MMC site. "CC-BY-SA" means the Creative Commons Attribution-Share Alike 3.0 license published by Creative Commons Corporation, a not-for-profit corporation with a principal place of business in San Francisco, California, as well as future copyleft versions of that license published by that same organization. "Incorporate" means to publish or republish a Document, in whole or in part, as part of another Document. An MMC is "eligible for relicensing" if it is licensed under this License, and if all works that were first published under this License somewhere other than this MMC, and subsequently incorporated in whole or in part into the MMC, (1) had no cover texts or invariant sections, and (2) were thus incorporated prior to November 1, 2008. The operator of an MMC Site may republish an MMC contained in the site under CC-BY-SA on the same site at any time before August 1, 2009, provided the MMC is eligible for relicensing. ADDENDUM: How to use this License for your documents ==================================================== To use this License in a document you have written, include a copy of the License in the document and put the following copyright and license notices just after the title page: Copyright (C) YEAR YOUR NAME. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license is included in the section entitled ``GNU Free Documentation License''. If you have Invariant Sections, Front-Cover Texts and Back-Cover Texts, replace the "with...Texts." line with this: with the Invariant Sections being LIST THEIR TITLES, with the Front-Cover Texts being LIST, and with the Back-Cover Texts being LIST. If you have Invariant Sections without Cover Texts, or some other combination of the three, merge those two alternatives to suit the situation. If your document contains nontrivial examples of program code, we recommend releasing these examples in parallel under your choice of free software license, such as the GNU General Public License, to permit their use in free software.  File: standards.info, Node: Index, Prev: GNU Free Documentation License, Up: Top Index ***** [index] * Menu: * #endif, commenting: Comments. (line 60) * --help output: --help. (line 6) * --version output: --version. (line 6) * -Wall compiler option: Syntactic Conventions. (line 10) * accepting contributions: Contributions. (line 6) * address for bug reports: --help. (line 11) * ANSI C standard: Standard C. (line 6) * arbitrary limits on data: Semantics. (line 6) * ASCII characters: Character Set. (line 6) * autoconf: System Portability. (line 23) * avoiding proprietary code: Reading Non-Free Code. (line 6) * batch of changes, in a change log: Change Log Concepts. (line 6) * behavior, dependent on program's name: User Interfaces. (line 6) * binary packages: Install Command Categories. (line 80) * bindir: Directory Variables. (line 57) * braces, in C source: Formatting. (line 9) * bug reports: --help. (line 11) * bug-standards@gnu.org email address: Preface. (line 30) * C compatibility: Compatibility. (line 6) * C library functions, and portability: System Functions. (line 6) * canonical name of a program: --version. (line 12) * casting pointers to integers: CPU Portability. (line 53) * CGI programs, standard options for: Command-Line Interfaces. (line 31) * change logs: Change Logs. (line 6) * change logs, conditional changes: Conditional Changes. (line 6) * change logs, style: Style of Change Logs. (line 6) * changeset, in a change log: Change Log Concepts. (line 6) * character set: Character Set. (line 6) * clang: Syntactic Conventions. (line 17) * command-line arguments, decoding: Semantics. (line 37) * command-line interface: Command-Line Interfaces. (line 6) * commenting: Comments. (line 6) * compatibility with C and POSIX standards: Compatibility. (line 6) * compiler warnings: Syntactic Conventions. (line 10) * conditional changes, and change logs: Conditional Changes. (line 6) * conditionals, comments for: Comments. (line 60) * configure: Configuration. (line 6) * control-L: Formatting. (line 131) * conventions for makefiles: Makefile Conventions. (line 6) * CORBA: Graphical Interfaces. (line 17) * credits for manuals: Manual Credits. (line 6) * D-bus: Graphical Interfaces. (line 17) * data structures, in Gnulib: System Functions. (line 44) * data types, and portability: CPU Portability. (line 6) * description, change log entry: Change Log Concepts. (line 22) * DESTDIR: DESTDIR. (line 6) * directories, creating installation: Directory Variables. (line 20) * documentation: Documentation. (line 6) * doschk: Names. (line 38) * double quote: Quote Characters. (line 6) * downloading this manual: Preface. (line 14) * dynamic plug-ins: Dynamic Plug-In Interfaces. (line 6) * encodings: Character Set. (line 6) * enum types, formatting: Formatting. (line 48) * error messages: Semantics. (line 16) * error messages, formatting: Errors. (line 6) * error messages, in Gnulib: System Functions. (line 44) * exec_prefix: Directory Variables. (line 39) * expressions, splitting: Formatting. (line 94) * FDL, GNU Free Documentation License: GNU Free Documentation License. (line 6) * file usage: File Usage. (line 6) * file-name limitations: Names. (line 38) * formatting error messages: Errors. (line 6) * formatting source code: Formatting. (line 6) * formfeed: Formatting. (line 131) * function argument, declaring: Syntactic Conventions. (line 6) * function definitions, formatting: Formatting. (line 9) * function prototypes: Standard C. (line 22) * getopt: Command-Line Interfaces. (line 6) * gettext: Internationalization. (line 6) * GNOME: Graphical Interfaces. (line 17) * GNOME and Guile: Source Language. (line 24) * Gnulib: System Functions. (line 37) * gnustandards project repository: Preface. (line 30) * gnustandards-commit@gnu.org mailing list: Preface. (line 24) * GNUstep: Graphical Interfaces. (line 6) * graphical user interface: Graphical Interfaces. (line 6) * grave accent: Quote Characters. (line 6) * GTK+: Graphical Interfaces. (line 6) * Guile: Source Language. (line 24) * header line, change log entry: Change Log Concepts. (line 14) * implicit int: Syntactic Conventions. (line 6) * impossible conditions: Semantics. (line 63) * installation directories, creating: Directory Variables. (line 20) * installations, staged: DESTDIR. (line 6) * interface styles: Graphical Interfaces. (line 6) * internationalization: Internationalization. (line 6) * keyboard interface: Graphical Interfaces. (line 17) * LDAP: OID Allocations. (line 6) * left quote: Quote Characters. (line 6) * legal aspects: Legal Issues. (line 6) * legal papers: Contributions. (line 6) * length of source lines: Formatting. (line 6) * libexecdir: Directory Variables. (line 70) * libiconv: Semantics. (line 11) * libraries: Libraries. (line 6) * library functions, and portability: System Functions. (line 6) * library interface: Graphical Interfaces. (line 17) * license for manuals: License for Manuals. (line 6) * line length: Formatting. (line 6) * lint: Syntactic Conventions. (line 17) * locale-specific quote characters: Quote Characters. (line 6) * long option names: Option Table. (line 6) * long-named options: Command-Line Interfaces. (line 12) * makefile, conventions for: Makefile Conventions. (line 6) * malloc return value: Semantics. (line 22) * man pages: Man Pages. (line 6) * manual structure: Manual Structure Details. (line 6) * memory allocation failure: Semantics. (line 22) * memory leak: Memory Usage. (line 23) * memory usage: Memory Usage. (line 6) * message text, and internationalization: Internationalization. (line 29) * mmap: Mmap. (line 6) * multiple variables in a line: Syntactic Conventions. (line 43) * names of variables, functions, and files: Names. (line 6) * NEWS file: NEWS File. (line 6) * non-ASCII characters: Character Set. (line 6) * non-POSIX systems, and portability: System Portability. (line 32) * non-standard extensions: Using Extensions. (line 6) * NUL characters: Semantics. (line 11) * OID allocations for GNU: OID Allocations. (line 6) * open brace: Formatting. (line 9) * opening quote: Quote Characters. (line 6) * optional features, configure-time: Configuration. (line 98) * options for compatibility: Compatibility. (line 14) * options, standard command-line: Command-Line Interfaces. (line 31) * output device and program's behavior: User Interfaces. (line 17) * packaging: Releases. (line 6) * PATH_INFO, specifying standard options as: Command-Line Interfaces. (line 31) * plug-ins: Dynamic Plug-In Interfaces. (line 6) * plugin_is_GPL_compatible: Dynamic Plug-In Interfaces. (line 17) * portability, and data types: CPU Portability. (line 6) * portability, and library functions: System Functions. (line 6) * portability, between system types: System Portability. (line 6) * POSIX compatibility: Compatibility. (line 6) * POSIX functions, and portability: System Functions. (line 6) * POSIXLY_CORRECT, environment variable: Compatibility. (line 21) * post-installation commands: Install Command Categories. (line 6) * pre-installation commands: Install Command Categories. (line 6) * prefix: Directory Variables. (line 29) * program configuration: Configuration. (line 6) * program design: Design Advice. (line 6) * program name and its behavior: User Interfaces. (line 6) * program's canonical name: --version. (line 12) * programming languages: Source Language. (line 6) * proprietary programs: Reading Non-Free Code. (line 6) * quote characters: Quote Characters. (line 6) * README file: Releases. (line 21) * references to non-free material: References. (line 6) * releasing: Managing Releases. (line 6) * right quote: Quote Characters. (line 6) * Savannah repository for gnustandards: Preface. (line 30) * sbindir: Directory Variables. (line 63) * signal handling: Semantics. (line 52) * single quote: Quote Characters. (line 6) * SNMP: OID Allocations. (line 6) * software forensics, and change logs: Change Logs. (line 15) * spaces before open-paren: Formatting. (line 88) * staged installs: DESTDIR. (line 6) * standard command-line options: Command-Line Interfaces. (line 31) * standards for makefiles: Makefile Conventions. (line 6) * struct types, formatting: Formatting. (line 48) * syntactic conventions: Syntactic Conventions. (line 6) * table of long options: Option Table. (line 6) * temporary files: Semantics. (line 77) * temporary variables: Syntactic Conventions. (line 31) * texinfo.tex, in a distribution: Releases. (line 78) * title, change log entry: Change Log Concepts. (line 14) * TMPDIR environment variable: Semantics. (line 77) * trademarks: Trademarks. (line 6) * user interface styles: Graphical Interfaces. (line 6) * valgrind: Memory Usage. (line 23) * VCS: Change Logs. (line 40) * version control system, for keeping change logs: Change Logs. (line 40) * version numbers, for releases: Releases. (line 6) * where to obtain standards.texi: Preface. (line 14) * X.509: OID Allocations. (line 6) * xmalloc, in Gnulib: System Functions. (line 44)  Tag Table: Node: Top857 Node: Preface2193 Node: Legal Issues4909 Node: Reading Non-Free Code5379 Node: Contributions7108 Node: Trademarks9281 Node: Design Advice10751 Node: Source Language11343 Node: Compatibility12905 Node: Using Extensions14533 Node: Standard C16110 Node: Conditional Compilation18710 Node: Program Behavior20108 Node: Non-GNU Standards21434 Node: Semantics23715 Node: Libraries28216 Node: Errors29461 Node: User Interfaces32031 Node: Finding Program Files34425 Node: Graphical Interfaces37726 Node: Command-Line Interfaces39136 Node: --version41180 Node: --help47117 Node: Dynamic Plug-In Interfaces47993 Node: Option Table49892 Node: OID Allocations64852 Node: Memory Usage66777 Node: File Usage68054 Node: Writing C68804 Node: Formatting69783 Node: Comments74401 Node: Syntactic Conventions77952 Node: Names81754 Node: System Portability83966 Node: CPU Portability86788 Node: System Functions89491 Node: Internationalization92036 Node: Character Set96036 Node: Quote Characters96891 Node: Mmap98454 Node: Documentation99195 Node: GNU Manuals100301 Node: Doc Strings and Manuals108036 Node: Manual Structure Details109589 Node: License for Manuals111007 Node: Manual Credits111982 Node: Printed Manuals112375 Node: NEWS File113062 Node: Change Logs113740 Node: Change Log Concepts117033 Node: Style of Change Logs123314 Node: Simple Changes127856 Node: Conditional Changes130008 Node: Indicating the Part Changed132458 Node: Man Pages132985 Node: Reading other Manuals135157 Node: Managing Releases135948 Node: Configuration136729 Node: Makefile Conventions145366 Node: Makefile Basics146365 Node: Utilities in Makefiles149539 Node: Command Variables152045 Node: DESTDIR155292 Node: Directory Variables157466 Node: Standard Targets172832 Node: Install Command Categories186934 Node: Releases191467 Node: References195688 Node: GNU Free Documentation License203012 Node: Index228161  End Tag Table  Local Variables: coding: utf-8 End: autoconf-2.71/lib/0000755000000000000000000000000014004625653011003 500000000000000autoconf-2.71/lib/Autom4te/0000755000000000000000000000000014004625653012505 500000000000000autoconf-2.71/lib/Autom4te/C4che.pm0000644000000000000000000001222514004621270013702 00000000000000# autoconf -- create `configure' using m4 macros # Copyright (C) 2003, 2006, 2009-2017, 2020-2021 Free Software # Foundation, Inc. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . package Autom4te::C4che; =head1 NAME Autom4te::C4che - a single m4 run request =head1 SYNOPSIS use Autom4te::C4che; =head1 DESCRIPTION This Perl module handles the cache of M4 runs used by autom4te. =cut use 5.006; use strict; use warnings FATAL => 'all'; use Carp; use Data::Dumper; use Autom4te::Request; =over 4 =item @request List of requests. Must be a package global so it can be accessed by code evaluated via 'eval', below. =cut our @request; =item C<$req = Autom4te::C4che-Eretrieve (%attr)> Find a request with the same path and input. =cut sub retrieve($%) { my ($self, %attr) = @_; foreach (@request) { # Same path. next if join ("\n", @{$_->path}) ne join ("\n", @{$attr{path}}); # Same inputs. next if join ("\n", @{$_->input}) ne join ("\n", @{$attr{input}}); # Found it. return $_; } return undef; } =item C<$req = Autom4te::C4che-Eregister (%attr)> Create and register a request for these path and input. =cut # $REQUEST-OBJ # register ($SELF, %ATTR) # ----------------------- # NEW should not be called directly. # Private. sub register ($%) { my ($self, %attr) = @_; # path and input are the only ID for a request object. my $obj = new Autom4te::Request ('path' => $attr{path}, 'input' => $attr{input}); push @request, $obj; # Assign an id for cache file. $obj->id ("$#request"); return $obj; } =item C<$req = Autom4te::C4che-Erequest (%request)> Get (retrieve or create) a request for the path C<$request{path}> and the input C<$request{input}>. =cut # $REQUEST-OBJ # request($SELF, %REQUEST) # ------------------------ sub request ($%) { my ($self, %request) = @_; my $req = Autom4te::C4che->retrieve (%request) || Autom4te::C4che->register (%request); # If there are new traces to produce, then we are not valid. foreach (@{$request{'macro'}}) { if (! exists ${$req->macro}{$_}) { ${$req->macro}{$_} = 1; $req->valid (0); } } # It would be great to have $REQ check that it is up to date wrt # its dependencies, but that requires getting traces (to fetch the # included files), which is out of the scope of Request (currently?). return $req; } =item C<$string = Autom4te::C4che-Emarshall ()> Serialize all the current requests. =cut # marshall($SELF) # --------------- sub marshall ($) { my ($caller) = @_; my $res = ''; my $marshall = Data::Dumper->new ([\@request], [qw (*request)]); $marshall->Indent(2)->Terse(0); $res = $marshall->Dump . "\n"; return $res; } =item Csave ($file, $version)> Save the cache in the C<$file> file object. =cut # SAVE ($FILE, $VERSION) # ---------------------- sub save ($$) { my ($self, $file, $version) = @_; confess "cannot save a single request\n" if ref ($self); $file->seek (0, 0); $file->truncate (0); print $file "# This file was generated by Autom4te $version.\n", "# It contains the lists of macros which have been traced.\n", "# It can be safely removed.\n", "\n", $self->marshall; } =item Cgood_version ($file, $version)> Succeed if the cache from the C<$file> file object is of the given version. =cut # GOOD_VERSION ($FILE, $VERSION) # ------------------------------ sub good_version ($$) { my ($self, $file, $version) = @_; my ($line) = $file->getline; return defined ($line) && $line eq "# This file was generated by Autom4te $version.\n"; } =item Cload ($file)> Load the cache from the C<$file> file object. =cut # LOAD ($FILE) # ------------ sub load ($$) { my ($self, $file) = @_; my $fname = $file->name; confess "cannot load a single request\n" if ref ($self); my $contents = join "", $file->getlines; eval $contents; confess "cannot eval $fname: $@\n" if $@; } =head1 SEE ALSO L =head1 HISTORY Written by Akim Demaille EFE. =cut 1; # for require ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: autoconf-2.71/lib/Autom4te/ChannelDefs.pm0000644000000000000000000003340513765663120015146 00000000000000# Copyright (C) 2002-2020 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . package Autom4te::ChannelDefs; =head1 NAME Autom4te::ChannelDefs - channel definitions for Automake and helper functions =head1 SYNOPSIS use Autom4te::ChannelDefs; print Autom4te::ChannelDefs::usage (), "\n"; prog_error ($MESSAGE, [%OPTIONS]); error ($WHERE, $MESSAGE, [%OPTIONS]); error ($MESSAGE); fatal ($WHERE, $MESSAGE, [%OPTIONS]); fatal ($MESSAGE); verb ($MESSAGE, [%OPTIONS]); switch_warning ($CATEGORY); parse_WARNINGS (); parse_warnings ($OPTION, @ARGUMENT); Autom4te::ChannelDefs::set_strictness ($STRICTNESS_NAME); =head1 DESCRIPTION This package defines channels that can be used in Automake to output diagnostics and other messages (via C). It also defines some helper function to enable or disable these channels, and some shorthand function to output on specific channels. =cut use 5.006; use strict; use warnings FATAL => 'all'; use Exporter; use Autom4te::Channels; use Autom4te::Config; BEGIN { if ($perl_threads) { require threads; import threads; } } our @ISA = qw (Exporter); our @EXPORT = qw (&prog_error &error &fatal &verb &switch_warning &parse_WARNINGS &parse_warnings &merge_WARNINGS); =head2 CHANNELS The following channels can be used as the first argument of C. For some of them we list a shorthand function that makes the code more readable. =over 4 =item C Fatal errors. Use C<&fatal> to send messages over this channel. =item C Common errors. Use C<&error> to send messages over this channel. =item C Errors related to GNU Standards. =item C Errors related to GNU Standards that should be warnings in 'foreign' mode. =item C Errors related to GNITS Standards (silent by default). =item C Internal errors. Use C<&prog_error> to send messages over this channel. =item C Constructs compromising the cross-compilation of the package. =item C Warnings related to GNU Coding Standards. =item C Warnings about obsolete features. =item C Warnings about user redefinitions of Automake rules or variables (silent by default). =item C Warnings about non-portable constructs. =item C Warnings about recursive variable expansions (C<$(foo$(x))>). These are not universally supported, but are more portable than the other non-portable constructs diagnosed by C<-Wportability>. These warnings are turned on by C<-Wportability> but can then be turned off separately by C<-Wno-portability-recursive>. =item C Extra warnings about non-portable constructs covering obscure tools. =item C Warnings about weird syntax, unused variables, typos... =item C Warnings about unsupported (or mis-supported) features. =item C Messages output in C<--verbose> mode. Use C<&verb> to send such messages. =item C Informative messages. =back =cut # Initialize our list of error/warning channels. # Do not forget to update &usage and the manual # if you add or change a warning channel. register_channel 'fatal', type => 'fatal', uniq_part => UP_NONE, ordered => 0; register_channel 'error', type => 'error'; register_channel 'error-gnu', type => 'error'; register_channel 'error-gnu/warn', type => 'error'; register_channel 'error-gnits', type => 'error', silent => 1; register_channel 'automake', type => 'fatal', backtrace => 1, header => ("####################\n" . "## Internal Error ##\n" . "####################\n"), footer => "\nPlease contact <$PACKAGE_BUGREPORT>.", uniq_part => UP_NONE, ordered => 0; register_channel 'cross', type => 'warning', silent => 1; register_channel 'gnu', type => 'warning'; register_channel 'obsolete', type => 'warning'; register_channel 'override', type => 'warning', silent => 1; register_channel 'portability', type => 'warning', silent => 1; register_channel 'extra-portability', type => 'warning', silent => 1; register_channel 'portability-recursive', type => 'warning', silent => 1; register_channel 'syntax', type => 'warning'; register_channel 'unsupported', type => 'warning'; register_channel 'verb', type => 'debug', silent => 1, uniq_part => UP_NONE, ordered => 0; register_channel 'note', type => 'debug', silent => 0; setup_channel_type 'warning', header => 'warning: '; setup_channel_type 'error', header => 'error: '; setup_channel_type 'fatal', header => 'error: '; =head2 FUNCTIONS =over 4 =item C Return the warning category descriptions. =cut sub usage () { return "Warning categories include: cross cross compilation issues gnu GNU coding standards (default in gnu and gnits modes) obsolete obsolete features or constructions (default) override user redefinitions of Automake rules or variables portability portability issues (default in gnu and gnits modes) portability-recursive nested Make variables (default with -Wportability) extra-portability extra portability issues related to obscure tools syntax dubious syntactic constructs (default) unsupported unsupported or incomplete features (default) all all the warnings no-CATEGORY turn off warnings in CATEGORY none turn off all the warnings error treat warnings as errors"; } =item C Signal a programming error (on channel C), display C<$MESSAGE>, and exit 1. =cut sub prog_error ($;%) { my ($msg, %opts) = @_; msg 'automake', '', $msg, %opts; } =item C =item C Uncategorized errors. =cut sub error ($;$%) { my ($where, $msg, %opts) = @_; msg ('error', $where, $msg, %opts); } =item C =item C Fatal errors. =cut sub fatal ($;$%) { my ($where, $msg, %opts) = @_; msg ('fatal', $where, $msg, %opts); } =item C C<--verbose> messages. =cut sub verb ($;%) { my ($msg, %opts) = @_; $msg = "thread " . threads->tid . ": " . $msg if $perl_threads; msg 'verb', '', $msg, %opts; } =item C If C<$CATEGORY> is C, turn on channel C. If it is C, turn C off. Else handle C and C for completeness. =cut sub switch_warning ($) { my ($cat) = @_; my $has_no = 0; if ($cat =~ /^no-(.*)$/) { $cat = $1; $has_no = 1; } if ($cat eq 'all') { setup_channel_type 'warning', silent => $has_no; } elsif ($cat eq 'none') { setup_channel_type 'warning', silent => ! $has_no; } elsif ($cat eq 'error') { $warnings_are_errors = ! $has_no; # Set exit code if Perl warns about something # (like uninitialized variables). $SIG{"__WARN__"} = $has_no ? 'DEFAULT' : sub { print STDERR @_; $exit_code = 1; }; } elsif (channel_type ($cat) eq 'warning') { setup_channel $cat, silent => $has_no; # # Handling of portability warnings is trickier. For relevant tests, # see 'dollarvar2', 'extra-portability' and 'extra-portability3'. # # -Wportability-recursive and -Wno-portability-recursive should not # have any effect on other 'portability' or 'extra-portability' # warnings, so there's no need to handle them separately or ad-hoc. # if ($cat eq 'extra-portability' && ! $has_no) # -Wextra-portability { # -Wextra-portability must enable 'portability' and # 'portability-recursive' warnings. setup_channel 'portability', silent => 0; setup_channel 'portability-recursive', silent => 0; } if ($cat eq 'portability') # -Wportability or -Wno-portability { if ($has_no) # -Wno-portability { # -Wno-portability must disable 'extra-portability' and # 'portability-recursive' warnings. setup_channel 'portability-recursive', silent => 1; setup_channel 'extra-portability', silent => 1; } else # -Wportability { # -Wportability must enable 'portability-recursive' # warnings. But it should have no influence over the # 'extra-portability' warnings. setup_channel 'portability-recursive', silent => 0; } } } else { return 1; } return 0; } =item C Parse the WARNINGS environment variable. =cut # Used to communicate from parse_WARNINGS to parse_warnings. our $_werror = 0; sub parse_WARNINGS () { if (exists $ENV{'WARNINGS'}) { # Ignore unknown categories. This is required because WARNINGS # should be honored by many tools. # For the same reason, do not turn on -Werror at this point, just # record that we saw it; parse_warnings will turn on -Werror after # the command line has been processed. foreach (split (',', $ENV{'WARNINGS'})) { if (/^(no-)?error$/) { $_werror = !defined $1; } else { switch_warning $_; } } } } =item C Parse the argument of C<--warning=CATEGORY> or C<-WCATEGORY>. C<@CATEGORIES> is the accumulated set of warnings categories. Use like this: Autom4te::GetOpt::parse_options ( # ... 'W|warnings=s' => \@warnings, ) # possibly call set_strictness here parse_warnings @warnings; =cut sub parse_warnings (@) { foreach my $cat (map { split ',' } @_) { if ($cat =~ /^(no-)?error$/) { $_werror = !defined $1; } elsif (switch_warning $cat) { msg 'unsupported', "unknown warning category '$cat'"; } } switch_warning ($_werror ? 'error' : 'no-error'); } =item C Merge the warnings categories in the environment variable C with the warnings categories in C<@CATEGORIES>, and return a new value for C. Values in C<@CATEGORIES> take precedence. Use like this: local $ENV{WARNINGS} = merge_WARNINGS @additional_warnings; =cut sub merge_WARNINGS (@) { my $werror = ''; my $all_or_none = ''; my %warnings; my @categories = split /,/, $ENV{WARNINGS} || ''; push @categories, @_; foreach (@categories) { if (/^(?:no-)?error$/) { $werror = $_; } elsif (/^(?:all|none)$/) { $all_or_none = $_; } else { # The character class in the second match group is ASCII \S minus # comma. We are generous with this because category values may come # from WARNINGS and we don't want to assume what other programs' # syntaxes for warnings categories are. /^(no-|)([\w\[\]\/\\!"#$%&'()*+-.:;<=>?@^`{|}~]+)$/ or die "Invalid warnings category: $_"; $warnings{$2} = $1; } } my @final_warnings; if ($all_or_none) { push @final_warnings, $all_or_none; } else { foreach (sort keys %warnings) { push @final_warnings, $warnings{$_} . $_; } } if ($werror) { push @final_warnings, $werror; } return join (',', @final_warnings); } =item C Configure channels for strictness C<$STRICTNESS_NAME>. =cut sub set_strictness ($) { my ($name) = @_; if ($name eq 'gnu') { setup_channel 'error-gnu', silent => 0; setup_channel 'error-gnu/warn', silent => 0, type => 'error'; setup_channel 'error-gnits', silent => 1; setup_channel 'portability', silent => 0; setup_channel 'extra-portability', silent => 1; setup_channel 'gnu', silent => 0; } elsif ($name eq 'gnits') { setup_channel 'error-gnu', silent => 0; setup_channel 'error-gnu/warn', silent => 0, type => 'error'; setup_channel 'error-gnits', silent => 0; setup_channel 'portability', silent => 0; setup_channel 'extra-portability', silent => 1; setup_channel 'gnu', silent => 0; } elsif ($name eq 'foreign') { setup_channel 'error-gnu', silent => 1; setup_channel 'error-gnu/warn', silent => 0, type => 'warning'; setup_channel 'error-gnits', silent => 1; setup_channel 'portability', silent => 1; setup_channel 'extra-portability', silent => 1; setup_channel 'gnu', silent => 1; } else { prog_error "level '$name' not recognized"; } } =back =head1 SEE ALSO L =head1 HISTORY Written by Alexandre Duret-Lutz EFE. =cut 1; ### Setup "GNU" style for perl-mode and cperl-mode. ## Local Variables: ## perl-indent-level: 2 ## perl-continued-statement-offset: 2 ## perl-continued-brace-offset: 0 ## perl-brace-offset: 0 ## perl-brace-imaginary-offset: 0 ## perl-label-offset: -2 ## cperl-indent-level: 2 ## cperl-brace-offset: 0 ## cperl-continued-brace-offset: 0 ## cperl-label-offset: -2 ## cperl-extra-newline-before-brace: t ## cperl-merge-trailing-else: nil ## cperl-continued-statement-offset: 2 ## End: autoconf-2.71/lib/Autom4te/Channels.pm0000644000000000000000000004741613765663120014536 00000000000000# Copyright (C) 2002-2020 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . ############################################################### # The main copy of this file is in Automake's git repository. # # Updates should be sent to automake-patches@gnu.org. # ############################################################### package Autom4te::Channels; =head1 NAME Autom4te::Channels - support functions for error and warning management =head1 SYNOPSIS use Autom4te::Channels; # Register a channel to output warnings about unused variables. register_channel 'unused', type => 'warning'; # Register a channel for system errors. register_channel 'system', type => 'error', exit_code => 4; # Output a message on channel 'unused'. msg 'unused', "$file:$line", "unused variable '$var'"; # Make the 'unused' channel silent. setup_channel 'unused', silent => 1; # Turn on all channels of type 'warning'. setup_channel_type 'warning', silent => 0; # Redirect all channels to push messages on a Thread::Queue using # the specified serialization key. setup_channel_queue $queue, $key; # Output a message pending in a Thread::Queue. pop_channel_queue $queue; # Treat all warnings as errors. $warnings_are_errors = 1; # Exit with the greatest exit code encountered so far. exit $exit_code; =head1 DESCRIPTION This perl module provides support functions for handling diagnostic channels in programs. Channels can be registered to convey fatal, error, warning, or debug messages. Each channel has various options (e.g. is the channel silent, should duplicate messages be removed, etc.) that can also be overridden on a per-message basis. =cut use 5.006; use strict; use warnings FATAL => 'all'; use Carp; use Exporter; use File::Basename; our @ISA = qw (Exporter); our @EXPORT = qw ($exit_code $warnings_are_errors &reset_local_duplicates &reset_global_duplicates ®ister_channel &msg &exists_channel &channel_type &setup_channel &setup_channel_type &dup_channel_setup &drop_channel_setup &buffer_messages &flush_messages &setup_channel_queue &pop_channel_queue US_GLOBAL US_LOCAL UP_NONE UP_TEXT UP_LOC_TEXT); our %channels; our $me = basename $0; =head2 Global Variables =over 4 =item C<$exit_code> The greatest exit code seen so far. C<$exit_code> is updated from the C options of C and C channels. =cut our $exit_code = 0; =item C<$warnings_are_errors> Set this variable to 1 if warning messages should be treated as errors (i.e. if they should update C<$exit_code>). =cut our $warnings_are_errors = 0; =back =head2 Constants =over 4 =item C, C, C Possible values for the C options. This selects the part of the message that should be considered when filtering out duplicates. If C is used, the location and the explanation message are used for filtering. If C is used, only the explanation message is used (so the same message will be filtered out if it appears at different locations). C means that duplicate messages should be output. =cut use constant UP_NONE => 0; use constant UP_TEXT => 1; use constant UP_LOC_TEXT => 2; =item C, C Possible values for the C options. Use C for error messages that should be printed only once during the execution of the program, C for message that should be printed only once per file. (Actually, C does not do this now when files are changed, it relies on you calling C when this happens.) =cut # possible values for uniq_scope use constant US_LOCAL => 0; use constant US_GLOBAL => 1; =back =head2 Options Channels accept the options described below. These options can be passed as a hash to the C, C, and C functions. The possible keys, with their default value are: =over =item C 'warning'> The type of the channel. One of C<'debug'>, C<'warning'>, C<'error'>, or C<'fatal'>. Fatal messages abort the program when they are output. Error messages update the exit status. Debug and warning messages are harmless, except that warnings are treated as errors if C<$warnings_are_errors> is set. =item C 1> The value to update C<$exit_code> with when a fatal or error message is emitted. C<$exit_code> is also updated for warnings output when C<$warnings_are_errors> is set. =item C \*STDERR> The file where the error should be output. =item C 0> Whether the channel should be silent. Use this do disable a category of warning, for instance. =item C 1> Whether, with multi-threaded execution, the message should be queued for ordered output. =item C UP_LOC_TEXT> The part of the message subject to duplicate filtering. See the documentation for the C, C, and C constants above. C can also be set to an arbitrary string that will be used instead of the message when considering duplicates. =item C US_LOCAL> The scope of duplicate filtering. See the documentation for the C, and C constants above. =item C
''> A string to prepend to each message emitted through this channel. With partial messages, only the first part will have C
prepended. =item C