mod_wsgi-3.4/000755 000765 000024 00000000000 12015317606 013573 5ustar00grahamstaff000000 000000 mod_wsgi-3.4/.hgignore000644 000765 000024 00000000141 12015317464 015374 0ustar00grahamstaff000000 000000 syntax: glob Makefile Makefile.in autom4te.cache config.log config.status .libs *.la *.lo *.slo mod_wsgi-3.4/.hgtags000644 000765 000024 00000000070 12015317464 015050 0ustar00grahamstaff000000 000000 d64191d297791f783dd3678a9a35c1a0a4d6cf0d mod_wsgi-3.4c1 mod_wsgi-3.4/configure000755 000765 000024 00000234353 12015317464 015516 0ustar00grahamstaff000000 000000 #! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.61. # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, # 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # PATH needs CR # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) as_nl=' ' IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi # Work around bugs in pre-3.0 UWIN ksh. for as_var in ENV MAIL MAILPATH do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. for as_var in \ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ LC_TELEPHONE LC_TIME do if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then eval $as_var=C; export $as_var else ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var fi done # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # CDPATH. $as_unset CDPATH if test "x$CONFIG_SHELL" = x; then if (eval ":") 2>/dev/null; then as_have_required=yes else as_have_required=no fi if test $as_have_required = yes && (eval ": (as_func_return () { (exit \$1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = \"\$1\" ); then : else exitcode=1 echo positional parameters were not saved. fi test \$exitcode = 0) || { (exit 1); exit 1; } ( as_lineno_1=\$LINENO as_lineno_2=\$LINENO test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" && test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; } ") 2> /dev/null; then : else as_candidate_shells= as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. case $as_dir in /*) for as_base in sh bash ksh sh5; do as_candidate_shells="$as_candidate_shells $as_dir/$as_base" done;; esac done IFS=$as_save_IFS for as_shell in $as_candidate_shells $SHELL; do # Try only shells that exist, to save several forks. if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { ("$as_shell") 2> /dev/null <<\_ASEOF if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi : _ASEOF }; then CONFIG_SHELL=$as_shell as_have_required=yes if { "$as_shell" 2> /dev/null <<\_ASEOF if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi : (as_func_return () { (exit $1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = "$1" ); then : else exitcode=1 echo positional parameters were not saved. fi test $exitcode = 0) || { (exit 1); exit 1; } ( as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; } _ASEOF }; then break fi fi done if test "x$CONFIG_SHELL" != x; then for as_var in BASH_ENV ENV do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done export CONFIG_SHELL exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} fi if test $as_have_required = no; then echo This script requires a shell more modern than all the echo shells that I found on your system. Please install a echo modern shell, or manually run the script under such a echo shell if you do have one. { (exit 1); exit 1; } fi fi fi (eval "as_func_return () { (exit \$1) } as_func_success () { as_func_return 0 } as_func_failure () { as_func_return 1 } as_func_ret_success () { return 0 } as_func_ret_failure () { return 1 } exitcode=0 if as_func_success; then : else exitcode=1 echo as_func_success failed. fi if as_func_failure; then exitcode=1 echo as_func_failure succeeded. fi if as_func_ret_success; then : else exitcode=1 echo as_func_ret_success failed. fi if as_func_ret_failure; then exitcode=1 echo as_func_ret_failure succeeded. fi if ( set x; as_func_ret_success y && test x = \"\$1\" ); then : else exitcode=1 echo positional parameters were not saved. fi test \$exitcode = 0") || { echo No shell found that supports shell functions. echo Please tell autoconf@gnu.org about your system, echo including any error possibly output before this echo message } as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line after each line using $LINENO; the second 'sed' # does the real work. The second script uses 'N' to pair each # line-number line with the line containing $LINENO, and appends # trailing '-' during substitution so that $LINENO is not a special # case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # scripts with optimization help from Paolo Bonzini. Blame Lee # E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in -n*) case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir fi echo >conf$$.file if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. PACKAGE_NAME= PACKAGE_TARNAME= PACKAGE_VERSION= PACKAGE_STRING= PACKAGE_BUGREPORT= ac_unique_file="mod_wsgi.c" ac_subst_vars='SHELL PATH_SEPARATOR PACKAGE_NAME PACKAGE_TARNAME PACKAGE_VERSION PACKAGE_STRING PACKAGE_BUGREPORT exec_prefix prefix program_transform_name bindir sbindir libexecdir datarootdir datadir sysconfdir sharedstatedir localstatedir includedir oldincludedir docdir infodir htmldir dvidir pdfdir psdir libdir localedir mandir DEFS ECHO_C ECHO_N ECHO_T LIBS build_alias host_alias target_alias APXS PYTHON CPPFLAGS CFLAGS LDFLAGS LDLIBS LIBEXECDIR LIBOBJS LTLIBOBJS' ac_subst_files='' ac_precious_vars='build_alias host_alias target_alias' # Initialize some variables set by options. ac_init_help= ac_init_version=false # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid feature name: $ac_feature" >&2 { (exit 1); exit 1; }; } ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` eval enable_$ac_feature=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid feature name: $ac_feature" >&2 { (exit 1); exit 1; }; } ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'` eval enable_$ac_feature=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid package name: $ac_package" >&2 { (exit 1); exit 1; }; } ac_package=`echo $ac_package | sed 's/[-.]/_/g'` eval with_$ac_package=\$ac_optarg ;; -without-* | --without-*) ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid package name: $ac_package" >&2 { (exit 1); exit 1; }; } ac_package=`echo $ac_package | sed 's/[-.]/_/g'` eval with_$ac_package=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) { echo "$as_me: error: unrecognized option: $ac_option Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && { echo "$as_me: error: invalid variable name: $ac_envvar" >&2 { (exit 1); exit 1; }; } eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option} ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` { echo "$as_me: error: missing argument to $ac_option" >&2 { (exit 1); exit 1; }; } fi # Be sure to have absolute directory names. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 { (exit 1); exit 1; }; } done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host. If a cross compiler is detected then cross compile mode will be used." >&2 elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || { echo "$as_me: error: Working directory cannot be determined" >&2 { (exit 1); exit 1; }; } test "X$ac_ls_di" = "X$ac_pwd_ls_di" || { echo "$as_me: error: pwd does not report name of working directory" >&2 { (exit 1); exit 1; }; } # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$0" || $as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$0" : 'X\(//\)[^/]' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || echo X"$0" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 { (exit 1); exit 1; }; } fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2 { (exit 1); exit 1; }; } pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures this package to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF _ACEOF fi if test -n "$ac_init_help"; then cat <<\_ACEOF Optional Features: --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --disable-framework disable mod_wsgi framework link --disable-embedded disable mod_wsgi embedded mode Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-apxs=NAME name of the apxs executable [apxs] --with-python=NAME name of the python executable [python] _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF configure generated by GNU Autoconf 2.61 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by $as_me, which was generated by GNU Autoconf 2.61. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; 2) ac_configure_args1="$ac_configure_args1 '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac done done $as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } $as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo cat <<\_ASBOX ## ---------------- ## ## Cache variables. ## ## ---------------- ## _ASBOX echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( *) $as_unset $ac_var ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo cat <<\_ASBOX ## ----------------- ## ## Output variables. ## ## ----------------- ## _ASBOX echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then cat <<\_ASBOX ## ------------------- ## ## File substitutions. ## ## ------------------- ## _ASBOX echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then cat <<\_ASBOX ## ----------- ## ## confdefs.h. ## ## ----------- ## _ASBOX echo cat confdefs.h echo fi test "$ac_signal" != 0 && echo "$as_me: caught signal $ac_signal" echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer explicitly selected file to automatically selected ones. if test -n "$CONFIG_SITE"; then set x "$CONFIG_SITE" elif test "x$prefix" != xNONE; then set x "$prefix/share/config.site" "$prefix/etc/config.site" else set x "$ac_default_prefix/share/config.site" \ "$ac_default_prefix/etc/config.site" fi shift for ac_site_file do if test -r "$ac_site_file"; then { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special # files actually), so we avoid doing that. if test -f "$cache_file"; then { echo "$as_me:$LINENO: loading cache $cache_file" >&5 echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { echo "$as_me:$LINENO: creating cache $cache_file" >&5 echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then { echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} { echo "$as_me:$LINENO: former value: $ac_old_val" >&5 echo "$as_me: former value: $ac_old_val" >&2;} { echo "$as_me:$LINENO: current value: $ac_new_val" >&5 echo "$as_me: current value: $ac_new_val" >&2;} ac_cache_corrupted=: fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 echo "$as_me: error: changes in the environment can compromise the build" >&2;} { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} { (exit 1); exit 1; }; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Check whether --enable-framework was given. if test "${enable_framework+set}" = set; then enableval=$enable_framework; ENABLE_FRAMEWORK=$enableval else ENABLE_FRAMEWORK=yes fi # Check whether --enable-embedded was given. if test "${enable_embedded+set}" = set; then enableval=$enable_embedded; ENABLE_EMBEDDED=$enableval else ENABLE_EMBEDDED=yes fi # Check whether --with-apxs was given. if test "${with_apxs+set}" = set; then withval=$with_apxs; APXS="$with_apxs" fi if test -z "${APXS}"; then for ac_prog in apxs2 apxs do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } if test "${ac_cv_path_APXS+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $APXS in [\\/]* | ?:[\\/]*) ac_cv_path_APXS="$APXS" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/local/apache/bin:/usr/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_APXS="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi APXS=$ac_cv_path_APXS if test -n "$APXS"; then { echo "$as_me:$LINENO: result: $APXS" >&5 echo "${ECHO_T}$APXS" >&6; } else { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } fi test -n "$APXS" && break done test -n "$APXS" || APXS="apxs" fi { echo "$as_me:$LINENO: checking Apache version" >&5 echo $ECHO_N "checking Apache version... $ECHO_C" >&6; } HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`" HTTPD_INCLUDEDIR="`${APXS} -q INCLUDEDIR`" if test -x ${HTTPD}; then HTTPD_VERSION=`${HTTPD} -v | awk '/version/ {print $3}' \ | awk -F/ '{print $2}'` else if test -f ${HTTPD_INCLUDEDIR}/ap_release.h; then HTTPD_VERSION=`grep '^#define AP_SERVER_MAJORVERSION_NUMBER ' \ ${HTTPD_INCLUDEDIR}/ap_release.h | sed -e \ 's/^#define AP_SERVER_MAJORVERSION_NUMBER \([0-9]\).*/\1.X/'` else if test -f ${HTTPD_INCLUDEDIR}/httpd.h; then HTTPD_VERSION=`grep '^#define APACHE_RELEASE ' \ ${HTTPD_INCLUDEDIR}/httpd.h | sed -e \ 's/^#define APACHE_RELEASE \([0-9]\).*/\1.X/'` else HTTPD_VERSION="2.?" fi fi fi { echo "$as_me:$LINENO: result: $HTTPD_VERSION" >&5 echo "${ECHO_T}$HTTPD_VERSION" >&6; } # Check whether --with-python was given. if test "${with_python+set}" = set; then withval=$with_python; PYTHON="$with_python" fi if test -z "${PYTHON}"; then for ac_prog in python do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { echo "$as_me:$LINENO: checking for $ac_word" >&5 echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; } if test "${ac_cv_path_PYTHON+set}" = set; then echo $ECHO_N "(cached) $ECHO_C" >&6 else case $PYTHON in [\\/]* | ?:[\\/]*) ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/local/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PYTHON=$ac_cv_path_PYTHON if test -n "$PYTHON"; then { echo "$as_me:$LINENO: result: $PYTHON" >&5 echo "${ECHO_T}$PYTHON" >&6; } else { echo "$as_me:$LINENO: result: no" >&5 echo "${ECHO_T}no" >&6; } fi test -n "$PYTHON" && break done test -n "$PYTHON" || PYTHON="python" fi PYTHON_VERSION=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write((sysconfig.get_config_var("VERSION")))'` CPPFLAGS1=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write("-I" + sysconfig.get_config_var("INCLUDEPY"))'` CPPFLAGS2=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(" ".join(filter(lambda x: x.startswith("-D"), \ sysconfig.get_config_var("CFLAGS").split())))'` if test "${ENABLE_EMBEDDED}" != "yes"; then CPPFLAGS3="-DMOD_WSGI_DISABLE_EMBEDDED" else CPPFLAGS3="" fi CPPFLAGS="${CPPFLAGS} ${CPPFLAGS1} ${CPPFLAGS2} ${CPPFLAGS3}" PYTHONLIBDIR=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBDIR"))'` PYTHONCFGDIR=`${PYTHON} -c 'from sys import stdout; \ import distutils.sysconfig; \ stdout.write(distutils.sysconfig.get_python_lib(plat_specific=1, \ standard_lib=1) +"/config")'` PYTHONFRAMEWORKDIR=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORKDIR"))'` PYTHONFRAMEWORKPREFIX=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX"))'` PYTHONFRAMEWORK=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORK"))'` if test "${PYTHONFRAMEWORKDIR}" = "no-framework" -o \ "${ENABLE_FRAMEWORK}" != "yes"; then LDFLAGS1="-L${PYTHONLIBDIR}" LDFLAGS2="-L${PYTHONCFGDIR}" LDLIBS1="-lpython${PYTHON_VERSION}" LDLIBS2=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBS"))'` LDLIBS3=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("SYSLIBS"))'` else LDFLAGS1="-Wl,-F${PYTHONFRAMEWORKPREFIX} -framework ${PYTHONFRAMEWORK}" VERSION="${PYTHON_VERSION}" STRING="${PYTHONFRAMEWORKDIR}/Versions/${VERSION}/${PYTHONFRAMEWORK}" LDFLAGS2=`${PYTHON} -c "from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var( \"LINKFORSHARED\").replace(\"${STRING}\", ''))"` LDLIBS1=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBS"))'` fi CFLAGS1="" for arg in ${CFLAGS} do CFLAGS1="${CFLAGS1} -Wc,$arg" done CFLAGS2="" if test -x /usr/bin/lipo; then LDFLAGS3="" ARCHITECTURES=`/usr/bin/lipo -info $HTTPD | sed -e 's/.*://'` for ARCH in $ARCHITECTURES; do CFLAGS2="${CFLAGS2} -Wc,'-arch ${ARCH}'" LDFLAGS3="${LDFLAGS3} -arch ${ARCH}" done fi CFLAGS="${CFLAGS1} ${CFLAGS2}" LDFLAGS="${LDFLAGS} ${LDFLAGS1} ${LDFLAGS2} ${LDFLAGS3}" LDLIBS="${LDLIBS} ${LDLIBS1} ${LDLIBS2} ${LDLIBS3}" LIBEXECDIR="`${APXS} -q LIBEXECDIR`" HTTPD_MAJOR_VERSION=`echo ${HTTPD_VERSION} | sed -e 's/\..*//'` rm -f Makefile.in ln -s posix-ap${HTTPD_MAJOR_VERSION}X.mk.in Makefile.in ac_config_files="$ac_config_files Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5 echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( *) $as_unset $ac_var ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes (double-quote # substitution turns \\\\ into \\, and sed turns \\ into \). sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then test "x$cache_file" != "x/dev/null" && { echo "$as_me:$LINENO: updating cache $cache_file" >&5 echo "$as_me: updating cache $cache_file" >&6;} cat confcache >$cache_file else { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' # Transform confdefs.h into DEFS. # Protect against shell expansion while executing Makefile rules. # Protect against Makefile macro expansion. # # If the first sed substitution is executed (which looks for macros that # take arguments), then branch to the quote section. Otherwise, # look for a macro that doesn't take arguments. ac_script=' t clear :clear s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g t quote s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g t quote b any :quote s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g s/\[/\\&/g s/\]/\\&/g s/\$/$$/g H :any ${ g s/^\n// s/\n/ /g p } ' DEFS=`sed -n "$ac_script" confdefs.h` ac_libobjs= ac_ltlibobjs= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : ${CONFIG_STATUS=./config.status} ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 echo "$as_me: creating $CONFIG_STATUS" >&6;} cat >$CONFIG_STATUS <<_ACEOF #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF ## --------------------- ## ## M4sh Initialization. ## ## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # PATH needs CR # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then echo "#! /bin/sh" >conf$$.sh echo "exit 0" >>conf$$.sh chmod +x conf$$.sh if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then PATH_SEPARATOR=';' else PATH_SEPARATOR=: fi rm -f conf$$.sh fi # Support unset when possible. if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then as_unset=unset else as_unset=false fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) as_nl=' ' IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 { (exit 1); exit 1; } fi # Work around bugs in pre-3.0 UWIN ksh. for as_var in ENV MAIL MAILPATH do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. for as_var in \ LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \ LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \ LC_TELEPHONE LC_TIME do if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then eval $as_var=C; export $as_var else ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var fi done # Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi # Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # CDPATH. $as_unset CDPATH as_lineno_1=$LINENO as_lineno_2=$LINENO test "x$as_lineno_1" != "x$as_lineno_2" && test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { # Create $as_me.lineno as a copy of $as_myself, but with $LINENO # uniformly replaced by the line number. The first 'sed' inserts a # line-number line after each line using $LINENO; the second 'sed' # does the real work. The second script uses 'N' to pair each # line-number line with the line containing $LINENO, and appends # trailing '-' during substitution so that $LINENO is not a special # case at line end. # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the # scripts with optimization help from Paolo Bonzini. Blame Lee # E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 { (exit 1); exit 1; }; } # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in -n*) case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir fi echo >conf$$.file if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -p'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -p' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -p' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false fi if test -x / >/dev/null 2>&1; then as_test_x='test -x' else if ls -dL / >/dev/null 2>&1; then as_ls_L_option=L else as_ls_L_option= fi as_test_x=' eval sh -c '\'' if test -d "$1"; then test -d "$1/."; else case $1 in -*)set "./$1";; esac; case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' fi as_executable_p=$as_test_x # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 # Save the log message, to keep $[0] and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by $as_me, which was generated by GNU Autoconf 2.61. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF cat >>$CONFIG_STATUS <<_ACEOF # Files that config.status was made for. config_files="$ac_config_files" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF ac_cs_usage="\ \`$as_me' instantiates files from templates according to the current configuration. Usage: $0 [OPTIONS] [FILE]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit -q, --quiet do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE Configuration files: $config_files Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF ac_cs_version="\\ config.status configured by $0, generated by GNU Autoconf 2.61, with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" Copyright (C) 2006 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # If no file are specified by the user, then we need to provide default # value. By we need to know if files were specified by the user. ac_need_defaults=: while test $# != 0 do case $1 in --*=*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) echo "$ac_cs_version"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift CONFIG_FILES="$CONFIG_FILES $ac_optarg" ac_need_defaults=false;; --he | --h | --help | --hel | -h ) echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) { echo "$as_me: error: unrecognized option: $1 Try \`$0 --help' for more information." >&2 { (exit 1); exit 1; }; } ;; *) ac_config_targets="$ac_config_targets $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF if \$ac_cs_recheck; then echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6 CONFIG_SHELL=$SHELL export CONFIG_SHELL exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 echo "$as_me: error: invalid argument: $ac_config_target" >&2;} { (exit 1); exit 1; }; };; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= trap 'exit_status=$? { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status ' 0 trap '{ (exit 1); exit 1; }' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || { echo "$me: cannot create a temporary directory in ." >&2 { (exit 1); exit 1; } } # # Set up the sed scripts for CONFIG_FILES section. # # No need to generate the scripts if there are no CONFIG_FILES. # This happens for instance when ./config.status config.h if test -n "$CONFIG_FILES"; then _ACEOF ac_delim='%!_!# ' for ac_last_try in false false false false false :; do cat >conf$$subs.sed <<_ACEOF SHELL!$SHELL$ac_delim PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim PACKAGE_NAME!$PACKAGE_NAME$ac_delim PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim PACKAGE_STRING!$PACKAGE_STRING$ac_delim PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim exec_prefix!$exec_prefix$ac_delim prefix!$prefix$ac_delim program_transform_name!$program_transform_name$ac_delim bindir!$bindir$ac_delim sbindir!$sbindir$ac_delim libexecdir!$libexecdir$ac_delim datarootdir!$datarootdir$ac_delim datadir!$datadir$ac_delim sysconfdir!$sysconfdir$ac_delim sharedstatedir!$sharedstatedir$ac_delim localstatedir!$localstatedir$ac_delim includedir!$includedir$ac_delim oldincludedir!$oldincludedir$ac_delim docdir!$docdir$ac_delim infodir!$infodir$ac_delim htmldir!$htmldir$ac_delim dvidir!$dvidir$ac_delim pdfdir!$pdfdir$ac_delim psdir!$psdir$ac_delim libdir!$libdir$ac_delim localedir!$localedir$ac_delim mandir!$mandir$ac_delim DEFS!$DEFS$ac_delim ECHO_C!$ECHO_C$ac_delim ECHO_N!$ECHO_N$ac_delim ECHO_T!$ECHO_T$ac_delim LIBS!$LIBS$ac_delim build_alias!$build_alias$ac_delim host_alias!$host_alias$ac_delim target_alias!$target_alias$ac_delim APXS!$APXS$ac_delim PYTHON!$PYTHON$ac_delim CPPFLAGS!$CPPFLAGS$ac_delim CFLAGS!$CFLAGS$ac_delim LDFLAGS!$LDFLAGS$ac_delim LDLIBS!$LDLIBS$ac_delim LIBEXECDIR!$LIBEXECDIR$ac_delim LIBOBJS!$LIBOBJS$ac_delim LTLIBOBJS!$LTLIBOBJS$ac_delim _ACEOF if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 46; then break elif $ac_last_try; then { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} { (exit 1); exit 1; }; } else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed` if test -n "$ac_eof"; then ac_eof=`echo "$ac_eof" | sort -nru | sed 1q` ac_eof=`expr $ac_eof + 1` fi cat >>$CONFIG_STATUS <<_ACEOF cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof /@[a-zA-Z_][a-zA-Z_0-9]*@/!b end _ACEOF sed ' s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g s/^/s,@/; s/!/@,|#_!!_#|/ :n t n s/'"$ac_delim"'$/,g/; t s/$/\\/; p N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n ' >>$CONFIG_STATUS >$CONFIG_STATUS <<_ACEOF :end s/|#_!!_#|//g CEOF$ac_eof _ACEOF # VPATH may cause trouble with some makes, so we remove $(srcdir), # ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=/{ s/:*\$(srcdir):*/:/ s/:*\${srcdir}:*/:/ s/:*@srcdir@:*/:/ s/^\([^=]*=[ ]*\):*/\1/ s/:*$// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF fi # test -n "$CONFIG_FILES" for ac_tag in :F $CONFIG_FILES do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5 echo "$as_me: error: Invalid tag $ac_tag." >&2;} { (exit 1); exit 1; }; };; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 echo "$as_me: error: cannot find input file: $ac_f" >&2;} { (exit 1); exit 1; }; };; esac ac_file_inputs="$ac_file_inputs $ac_f" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input="Generated from "`IFS=: echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure." if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { echo "$as_me:$LINENO: creating $ac_file" >&5 echo "$as_me: creating $ac_file" >&6;} fi case $ac_tag in *:-:* | *:-) cat >"$tmp/stdin";; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` { as_dir="$ac_dir" case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 echo "$as_me: error: cannot create directory $as_dir" >&2;} { (exit 1); exit 1; }; }; } ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= case `sed -n '/datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p ' $ac_file_inputs` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF sed "$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s&@configure_input@&$configure_input&;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t $ac_datarootdir_hack " $ac_file_inputs | sed -f "$tmp/subs-1.sed" >$tmp/out test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&5 echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&2;} rm -f "$tmp/stdin" case $ac_file in -) cat "$tmp/out"; rm -f "$tmp/out";; *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;; esac ;; esac done # for ac_tag { (exit 0); exit 0; } _ACEOF chmod +x $CONFIG_STATUS ac_clean_files=$ac_clean_files_save # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || { (exit 1); exit 1; } fi mod_wsgi-3.4/configure.ac000644 000765 000024 00000013376 12015317464 016075 0ustar00grahamstaff000000 000000 dnl vim: set sw=4 expandtab : dnl dnl Copyright 2007-2009 GRAHAM DUMPLETON dnl dnl Licensed under the Apache License, Version 2.0 (the "License"); dnl you may not use this file except in compliance with the License. dnl You may obtain a copy of the License at dnl dnl http://www.apache.org/licenses/LICENSE-2.0 dnl dnl Unless required by applicable law or agreed to in writing, software dnl distributed under the License is distributed on an "AS IS" BASIS, dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. dnl See the License for the specific language governing permissions and dnl limitations under the License. dnl Process this file with autoconf to produce a configure script. AC_INIT(mod_wsgi.c) AC_ARG_ENABLE(framework, AC_HELP_STRING([--disable-framework], [disable mod_wsgi framework link]), [ENABLE_FRAMEWORK=$enableval], [ENABLE_FRAMEWORK=yes]) AC_ARG_ENABLE(embedded, AC_HELP_STRING([--disable-embedded], [disable mod_wsgi embedded mode]), [ENABLE_EMBEDDED=$enableval], [ENABLE_EMBEDDED=yes]) AC_ARG_WITH(apxs, AC_HELP_STRING([--with-apxs=NAME], [name of the apxs executable [[apxs]]]), [APXS="$with_apxs"]) if test -z "${APXS}"; then AC_PATH_PROGS(APXS, apxs2 apxs, [apxs], [$PATH:/usr/local/apache/bin:/usr/sbin]) fi AC_SUBST(APXS) AC_MSG_CHECKING(Apache version) HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`" HTTPD_INCLUDEDIR="`${APXS} -q INCLUDEDIR`" if test -x ${HTTPD}; then HTTPD_VERSION=`${HTTPD} -v | awk '/version/ {print $3}' \ | awk -F/ '{print $2}'` else if test -f ${HTTPD_INCLUDEDIR}/ap_release.h; then HTTPD_VERSION=`grep '^#define AP_SERVER_MAJORVERSION_NUMBER ' \ ${HTTPD_INCLUDEDIR}/ap_release.h | sed -e \ 's/^#define AP_SERVER_MAJORVERSION_NUMBER \([0-9]\).*/\1.X/'` else if test -f ${HTTPD_INCLUDEDIR}/httpd.h; then HTTPD_VERSION=`grep '^#define APACHE_RELEASE ' \ ${HTTPD_INCLUDEDIR}/httpd.h | sed -e \ 's/^#define APACHE_RELEASE \([0-9]\).*/\1.X/'` else HTTPD_VERSION="2.?" fi fi fi AC_MSG_RESULT($HTTPD_VERSION) AC_ARG_WITH(python, AC_HELP_STRING([--with-python=NAME], [name of the python executable [[python]]]), [PYTHON="$with_python"]) if test -z "${PYTHON}"; then AC_PATH_PROGS(PYTHON, python, [python], [$PATH:/usr/local/bin]) fi AC_SUBST(PYTHON) PYTHON_VERSION=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write((sysconfig.get_config_var("VERSION")))'` CPPFLAGS1=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write("-I" + sysconfig.get_config_var("INCLUDEPY"))'` CPPFLAGS2=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(" ".join(filter(lambda x: x.startswith("-D"), \ sysconfig.get_config_var("CFLAGS").split())))'` if test "${ENABLE_EMBEDDED}" != "yes"; then CPPFLAGS3="-DMOD_WSGI_DISABLE_EMBEDDED" else CPPFLAGS3="" fi CPPFLAGS="${CPPFLAGS} ${CPPFLAGS1} ${CPPFLAGS2} ${CPPFLAGS3}" AC_SUBST(CPPFLAGS) PYTHONLIBDIR=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBDIR"))'` PYTHONCFGDIR=`${PYTHON} -c 'from sys import stdout; \ import distutils.sysconfig; \ stdout.write(distutils.sysconfig.get_python_lib(plat_specific=1, \ standard_lib=1) +"/config")'` PYTHONFRAMEWORKDIR=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORKDIR"))'` PYTHONFRAMEWORKPREFIX=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX"))'` PYTHONFRAMEWORK=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORK"))'` if test "${PYTHONFRAMEWORKDIR}" = "no-framework" -o \ "${ENABLE_FRAMEWORK}" != "yes"; then LDFLAGS1="-L${PYTHONLIBDIR}" LDFLAGS2="-L${PYTHONCFGDIR}" LDLIBS1="-lpython${PYTHON_VERSION}" LDLIBS2=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBS"))'` LDLIBS3=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("SYSLIBS"))'` else LDFLAGS1="-Wl,-F${PYTHONFRAMEWORKPREFIX} -framework ${PYTHONFRAMEWORK}" VERSION="${PYTHON_VERSION}" STRING="${PYTHONFRAMEWORKDIR}/Versions/${VERSION}/${PYTHONFRAMEWORK}" LDFLAGS2=`${PYTHON} -c "from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var( \"LINKFORSHARED\").replace(\"${STRING}\", ''))"` LDLIBS1=`${PYTHON} -c 'from sys import stdout; \ from distutils import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBS"))'` fi CFLAGS1="" for arg in ${CFLAGS} do CFLAGS1="${CFLAGS1} -Wc,$arg" done CFLAGS2="" if test -x /usr/bin/lipo; then LDFLAGS3="" ARCHITECTURES=`/usr/bin/lipo -info $HTTPD | sed -e 's/.*://'` for ARCH in $ARCHITECTURES; do CFLAGS2="${CFLAGS2} -Wc,'-arch ${ARCH}'" LDFLAGS3="${LDFLAGS3} -arch ${ARCH}" done fi CFLAGS="${CFLAGS1} ${CFLAGS2}" LDFLAGS="${LDFLAGS} ${LDFLAGS1} ${LDFLAGS2} ${LDFLAGS3}" LDLIBS="${LDLIBS} ${LDLIBS1} ${LDLIBS2} ${LDLIBS3}" AC_SUBST(CFLAGS) AC_SUBST(LDFLAGS) AC_SUBST(LDLIBS) LIBEXECDIR="`${APXS} -q LIBEXECDIR`" AC_SUBST(LIBEXECDIR) HTTPD_MAJOR_VERSION=`echo ${HTTPD_VERSION} | sed -e 's/\..*//'` rm -f Makefile.in ln -s posix-ap${HTTPD_MAJOR_VERSION}X.mk.in Makefile.in AC_OUTPUT(Makefile) mod_wsgi-3.4/LICENCE000644 000765 000024 00000026136 12015317464 014572 0ustar00grahamstaff000000 000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mod_wsgi-3.4/mod_wsgi.c000644 000765 000024 00001663110 12015317464 015561 0ustar00grahamstaff000000 000000 /* vim: set sw=4 expandtab : */ /* * Copyright 2007-2012 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * Enabled access to Apache private API and data structures. Need to do * this to access the following: * * In Apache 1.3 it is not possible to access ap_check_cmd_context() * where as this was made public in Apache 2.0. * * In Apache 2.X need access to ap_create_request_config(). * * In Apache 2.X need access to core_module and core_request_config. * */ #define CORE_PRIVATE 1 #include "httpd.h" #if !defined(HTTPD_ROOT) #error Sorry, Apache developer package does not appear to be installed. #endif #if !defined(AP_SERVER_MAJORVERSION_NUMBER) #if AP_MODULE_MAGIC_AT_LEAST(20010224,0) #define AP_SERVER_MAJORVERSION_NUMBER 2 #define AP_SERVER_MINORVERSION_NUMBER 0 #define AP_SERVER_PATCHLEVEL_NUMBER 0 #else #define AP_SERVER_MAJORVERSION_NUMBER 1 #define AP_SERVER_MINORVERSION_NUMBER 3 #define AP_SERVER_PATCHLEVEL_NUMBER 0 #endif #endif #if !defined(AP_SERVER_BASEVERSION) #define AP_SERVER_BASEVERSION SERVER_BASEVERSION #endif #if AP_SERVER_MAJORVERSION_NUMBER < 2 typedef int apr_status_t; #define APR_SUCCESS 0 typedef pool apr_pool_t; typedef unsigned int apr_port_t; #include "ap_ctype.h" #include "ap_alloc.h" #define apr_isspace ap_isspace #define apr_table_make ap_make_table #define apr_table_get ap_table_get #define apr_table_set ap_table_set #define apr_table_setn ap_table_setn #define apr_table_add ap_table_add #define apr_table_elts ap_table_elts #define apr_array_make ap_make_array #define apr_array_push ap_push_array #define apr_array_cat ap_array_cat #define apr_array_append ap_append_arrays typedef array_header apr_array_header_t; typedef table apr_table_t; typedef table_entry apr_table_entry_t; typedef int apr_size_t; typedef unsigned long apr_off_t; #define apr_psprintf ap_psprintf #define apr_pstrndup ap_pstrndup #define apr_pstrdup ap_pstrdup #define apr_pstrcat ap_pstrcat #define apr_pcalloc ap_pcalloc #define apr_palloc ap_palloc #define apr_isalnum isalnum #define apr_toupper toupper typedef time_t apr_time_t; #include "http_config.h" typedef int apr_lockmech_e; #else #include "apr_lib.h" #include "ap_mpm.h" #include "ap_compat.h" #include "apr_tables.h" #include "apr_strings.h" #include "http_config.h" #include "ap_listen.h" #include "apr_version.h" #include "apr_optional.h" APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *)); APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, (apr_pool_t *, server_rec *, conn_rec *, request_rec *, char *)); #endif #include "ap_config.h" #include "http_core.h" #include "http_log.h" #include "http_main.h" #include "http_protocol.h" #include "http_request.h" #include "util_script.h" #include "util_md5.h" #ifndef APR_FPROT_GWRITE #define APR_FPROT_GWRITE APR_GWRITE #endif #ifndef APR_FPROT_WWRITE #define APR_FPROT_WWRITE APR_WWRITE #endif #if !AP_MODULE_MAGIC_AT_LEAST(20050127,0) /* Debian backported ap_regex_t to Apache 2.0 and * thus made official version checking break. */ #ifndef AP_REG_EXTENDED typedef regex_t ap_regex_t; typedef regmatch_t ap_regmatch_t; #define AP_REG_EXTENDED REG_EXTENDED #endif #endif #if !AP_MODULE_MAGIC_AT_LEAST(20081201,0) #define ap_unixd_config unixd_config #endif #ifndef WIN32 #include #endif #include "Python.h" #if !defined(PY_VERSION_HEX) #error Sorry, Python developer package does not appear to be installed. #endif #if PY_VERSION_HEX <= 0x02030000 #error Sorry, mod_wsgi requires at least Python 2.3.0 for Python 2.X. #endif #if PY_VERSION_HEX >= 0x03000000 && PY_VERSION_HEX < 0x03010000 #error Sorry, mod_wsgi requires at least Python 3.1.0 for Python 3.X. #endif #if !defined(WITH_THREAD) #error Sorry, mod_wsgi requires that Python supporting thread. #endif #include "compile.h" #include "node.h" #include "osdefs.h" #ifndef PyVarObject_HEAD_INIT #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #endif #if PY_MAJOR_VERSION >= 3 #define PyStringObject PyBytesObject #define PyString_Check PyBytes_Check #define PyString_Size PyBytes_Size #define PyString_AsString PyBytes_AsString #define PyString_FromString PyBytes_FromString #define PyString_FromStringAndSize PyBytes_FromStringAndSize #define PyString_AS_STRING PyBytes_AS_STRING #define PyString_GET_SIZE PyBytes_GET_SIZE #define _PyString_Resize _PyBytes_Resize #endif #ifndef WIN32 #if AP_SERVER_MAJORVERSION_NUMBER >= 2 #if APR_HAS_OTHER_CHILD && APR_HAS_THREADS && APR_HAS_FORK #define MOD_WSGI_WITH_DAEMONS 1 #endif #endif #endif #if AP_SERVER_MAJORVERSION_NUMBER >= 2 #define MOD_WSGI_WITH_BUCKETS 1 #define MOD_WSGI_WITH_AAA_HANDLERS 1 #endif #if defined(MOD_WSGI_WITH_AAA_HANDLERS) static PyTypeObject Auth_Type; #if AP_SERVER_MAJORVERSION_NUMBER >= 2 #if AP_SERVER_MINORVERSION_NUMBER >= 2 #define MOD_WSGI_WITH_AUTHN_PROVIDER 1 #endif #endif #if AP_MODULE_MAGIC_AT_LEAST(20060110,0) #define MOD_WSGI_WITH_AUTHZ_PROVIDER 1 #if AP_MODULE_MAGIC_AT_LEAST(20100919,0) #define MOD_WSGI_WITH_AUTHZ_PROVIDER_PARSED 1 #endif #endif #endif #if defined(MOD_WSGI_WITH_AUTHN_PROVIDER) #include "mod_auth.h" #include "ap_provider.h" #ifndef AUTHN_PROVIDER_VERSION #define AUTHN_PROVIDER_VERSION "0" #endif #endif #if defined(MOD_WSGI_WITH_DAEMONS) #if !AP_MODULE_MAGIC_AT_LEAST(20051115,0) static void ap_close_listeners(void) { ap_listen_rec *lr; for (lr = ap_listeners; lr; lr = lr->next) { apr_socket_close(lr->sd); lr->active = 0; } } #endif #if (APR_MAJOR_VERSION == 0) && \ (APR_MINOR_VERSION == 9) && \ (APR_PATCH_VERSION < 5) static apr_status_t apr_unix_file_cleanup(void *thefile) { apr_file_t *file = thefile; return apr_file_close(file); } static apr_status_t apr_os_pipe_put_ex(apr_file_t **file, apr_os_file_t *thefile, int register_cleanup, apr_pool_t *pool) { apr_status_t rv; rv = apr_os_pipe_put(file, thefile, pool); if (register_cleanup) { apr_pool_cleanup_register(pool, (void *)(*file), apr_unix_file_cleanup, apr_pool_cleanup_null); } return rv; } #endif #endif #if AP_SERVER_MAJORVERSION_NUMBER < 2 static char *apr_off_t_toa(apr_pool_t *p, apr_off_t n) { const int BUFFER_SIZE = sizeof(apr_off_t) * 3 + 2; char *buf = apr_palloc(p, BUFFER_SIZE); char *start = buf + BUFFER_SIZE - 1; int negative; if (n < 0) { negative = 1; n = -n; } else { negative = 0; } *start = 0; do { *--start = '0' + (char)(n % 10); n /= 10; } while (n); if (negative) { *--start = '-'; } return start; } #endif #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) typedef apr_uint16_t apr_wchar_t; APR_DECLARE(apr_status_t) apr_conv_utf8_to_ucs2(const char *in, apr_size_t *inbytes, apr_wchar_t *out, apr_size_t *outwords); static apr_status_t wsgi_utf8_to_unicode_path(apr_wchar_t* retstr, apr_size_t retlen, const char* srcstr) { /* TODO: The computations could preconvert the string to determine * the true size of the retstr, but that's a memory over speed * tradeoff that isn't appropriate this early in development. * * Allocate the maximum string length based on leading 4 * characters of \\?\ (allowing nearly unlimited path lengths) * plus the trailing null, then transform /'s into \\'s since * the \\?\ form doesn't allow '/' path seperators. * * Note that the \\?\ form only works for local drive paths, and * \\?\UNC\ is needed UNC paths. */ apr_size_t srcremains = strlen(srcstr) + 1; apr_wchar_t *t = retstr; apr_status_t rv; /* This is correct, we don't twist the filename if it is will * definately be shorter than 248 characters. It merits some * performance testing to see if this has any effect, but there * seem to be applications that get confused by the resulting * Unicode \\?\ style file names, especially if they use argv[0] * or call the Win32 API functions such as GetModuleName, etc. * Not every application is prepared to handle such names. * * Note also this is shorter than MAX_PATH, as directory paths * are actually limited to 248 characters. * * Note that a utf-8 name can never result in more wide chars * than the original number of utf-8 narrow chars. */ if (srcremains > 248) { if (srcstr[1] == ':' && (srcstr[2] == '/' || srcstr[2] == '\\')) { wcscpy (retstr, L"\\\\?\\"); retlen -= 4; t += 4; } else if ((srcstr[0] == '/' || srcstr[0] == '\\') && (srcstr[1] == '/' || srcstr[1] == '\\') && (srcstr[2] != '?')) { /* Skip the slashes */ srcstr += 2; srcremains -= 2; wcscpy (retstr, L"\\\\?\\UNC\\"); retlen -= 8; t += 8; } } if (rv = apr_conv_utf8_to_ucs2(srcstr, &srcremains, t, &retlen)) { return (rv == APR_INCOMPLETE) ? APR_EINVAL : rv; } if (srcremains) { return APR_ENAMETOOLONG; } for (; *t; ++t) if (*t == L'/') *t = L'\\'; return APR_SUCCESS; } #endif /* Compatibility macros for log level and status. */ #if AP_SERVER_MAJORVERSION_NUMBER < 2 #define WSGI_LOG_LEVEL(l) l #define WSGI_LOG_LEVEL_AND_STATUS(l, e) l | (!e ? APLOG_NOERRNO : 0) #else #define WSGI_LOG_LEVEL(l) l, 0 #define WSGI_LOG_LEVEL_AND_STATUS(l, e) l, e #endif #define WSGI_LOG_EMERG(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_EMERG, e) #define WSGI_LOG_ALERT(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_ALERT, e) #define WSGI_LOG_CRIT(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_CRIT, e) #define WSGI_LOG_ERR(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_ERR, e) #define WSGI_LOG_WARNING(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_WARNING, e) #define WSGI_LOG_NOTICE(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_NOTICE, e) #define WSGI_LOG_INFO(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_INFO, e) #define WSGI_LOG_DEBUG(e) WSGI_LOG_LEVEL_AND_STATUS(APLOG_DEBUG, e) /* Version and module information. */ #define MOD_WSGI_MAJORVERSION_NUMBER 3 #define MOD_WSGI_MINORVERSION_NUMBER 4 #define MOD_WSGI_VERSION_STRING "3.4" #if AP_SERVER_MAJORVERSION_NUMBER < 2 module MODULE_VAR_EXPORT wsgi_module; #else module AP_MODULE_DECLARE_DATA wsgi_module; #endif /* Constants. */ #define WSGI_RELOAD_MODULE 0 #define WSGI_RELOAD_PROCESS 1 /* Python interpreter state. */ static PyThreadState *wsgi_main_tstate = NULL; /* Base server object. */ static server_rec *wsgi_server = NULL; /* Process information. */ static pid_t wsgi_parent_pid = 0; static int wsgi_multiprocess = 1; static int wsgi_multithread = 1; /* Daemon information. */ static const char *wsgi_daemon_group = ""; static apr_array_header_t *wsgi_daemon_list = NULL; static apr_pool_t *wsgi_parent_pool = NULL; static apr_pool_t *wsgi_daemon_pool = NULL; static int volatile wsgi_daemon_shutdown = 0; #if defined(MOD_WSGI_WITH_DAEMONS) static apr_interval_time_t wsgi_deadlock_timeout = 0; static apr_interval_time_t wsgi_inactivity_timeout = 0; static apr_time_t volatile wsgi_deadlock_shutdown_time = 0; static apr_time_t volatile wsgi_inactivity_shutdown_time = 0; static apr_thread_mutex_t* wsgi_shutdown_lock = NULL; #endif /* Script information. */ static apr_array_header_t *wsgi_import_list = NULL; /* Configuration objects. */ typedef struct { const char *location; const char *application; ap_regex_t *regexp; const char *process_group; const char *application_group; const char *callable_object; int pass_authorization; } WSGIAliasEntry; typedef struct { const char *handler_script; const char *process_group; const char *application_group; const char *callable_object; const char *pass_authorization; } WSGIScriptFile; typedef struct { apr_pool_t *pool; apr_array_header_t *alias_list; const char *socket_prefix; apr_lockmech_e lock_mechanism; int verbose_debugging; apr_array_header_t *python_warnings; int python_optimize; int py3k_warning_flag; int dont_write_bytecode; const char *lang; const char *locale; const char *python_home; const char *python_path; const char *python_eggs; int restrict_embedded; int restrict_stdin; int restrict_stdout; int restrict_signal; int case_sensitivity; apr_table_t *restrict_process; const char *process_group; const char *application_group; const char *callable_object; WSGIScriptFile *dispatch_script; int pass_apache_request; int pass_authorization; int script_reloading; int error_override; int chunked_request; int enable_sendfile; #if AP_SERVER_MAJORVERSION_NUMBER >= 2 apr_hash_t *handler_scripts; #endif } WSGIServerConfig; static WSGIServerConfig *wsgi_server_config = NULL; static WSGIScriptFile *newWSGIScriptFile(apr_pool_t *p) { WSGIScriptFile *object = NULL; object = (WSGIScriptFile *)apr_pcalloc(p, sizeof(WSGIScriptFile)); object->handler_script = NULL; object->application_group = NULL; object->process_group = NULL; return object; } static WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p) { WSGIServerConfig *object = NULL; object = (WSGIServerConfig *)apr_pcalloc(p, sizeof(WSGIServerConfig)); object->pool = p; object->alias_list = NULL; object->socket_prefix = NULL; #if defined(MOD_WSGI_WITH_DAEMONS) object->socket_prefix = DEFAULT_REL_RUNTIMEDIR "/wsgi"; object->socket_prefix = ap_server_root_relative(p, object->socket_prefix); #endif object->verbose_debugging = 0; object->python_warnings = NULL; object->py3k_warning_flag = -1; object->python_optimize = -1; object->dont_write_bytecode = -1; object->lang = NULL; object->locale = NULL; object->python_home = NULL; object->python_path = NULL; object->python_eggs = NULL; object->restrict_embedded = -1; object->restrict_stdin = -1; object->restrict_stdout = -1; object->restrict_signal = -1; #if defined(WIN32) || defined(DARWIN) object->case_sensitivity = 0; #else object->case_sensitivity = 1; #endif object->restrict_process = NULL; object->process_group = NULL; object->application_group = NULL; object->callable_object = NULL; object->dispatch_script = NULL; object->pass_apache_request = -1; object->pass_authorization = -1; object->script_reloading = -1; object->error_override = -1; object->chunked_request = -1; object->enable_sendfile = -1; return object; } static void *wsgi_create_server_config(apr_pool_t *p, server_rec *s) { WSGIServerConfig *config = NULL; config = newWSGIServerConfig(p); return config; } static void *wsgi_merge_server_config(apr_pool_t *p, void *base_conf, void *new_conf) { WSGIServerConfig *config = NULL; WSGIServerConfig *parent = NULL; WSGIServerConfig *child = NULL; config = newWSGIServerConfig(p); parent = (WSGIServerConfig *)base_conf; child = (WSGIServerConfig *)new_conf; if (child->alias_list && parent->alias_list) { config->alias_list = apr_array_append(p, child->alias_list, parent->alias_list); } else if (child->alias_list) { config->alias_list = apr_array_make(p, 20, sizeof(WSGIAliasEntry)); apr_array_cat(config->alias_list, child->alias_list); } else if (parent->alias_list) { config->alias_list = apr_array_make(p, 20, sizeof(WSGIAliasEntry)); apr_array_cat(config->alias_list, parent->alias_list); } if (child->restrict_process) config->restrict_process = child->restrict_process; else config->restrict_process = parent->restrict_process; if (child->process_group) config->process_group = child->process_group; else config->process_group = parent->process_group; if (child->application_group) config->application_group = child->application_group; else config->application_group = parent->application_group; if (child->callable_object) config->callable_object = child->callable_object; else config->callable_object = parent->callable_object; if (child->dispatch_script) config->dispatch_script = child->dispatch_script; else config->dispatch_script = parent->dispatch_script; if (child->pass_apache_request != -1) config->pass_apache_request = child->pass_apache_request; else config->pass_apache_request = parent->pass_apache_request; if (child->pass_authorization != -1) config->pass_authorization = child->pass_authorization; else config->pass_authorization = parent->pass_authorization; if (child->script_reloading != -1) config->script_reloading = child->script_reloading; else config->script_reloading = parent->script_reloading; if (child->error_override != -1) config->error_override = child->error_override; else config->error_override = parent->error_override; if (child->chunked_request != -1) config->chunked_request = child->chunked_request; else config->chunked_request = parent->chunked_request; if (child->enable_sendfile != -1) config->enable_sendfile = child->enable_sendfile; else config->enable_sendfile = parent->enable_sendfile; #if AP_SERVER_MAJORVERSION_NUMBER >= 2 if (!child->handler_scripts) config->handler_scripts = parent->handler_scripts; else if (!parent->handler_scripts) config->handler_scripts = child->handler_scripts; else { config->handler_scripts = apr_hash_overlay(p, child->handler_scripts, parent->handler_scripts); } #endif return config; } typedef struct { apr_pool_t *pool; apr_table_t *restrict_process; const char *process_group; const char *application_group; const char *callable_object; WSGIScriptFile *dispatch_script; int pass_apache_request; int pass_authorization; int script_reloading; int error_override; int chunked_request; int enable_sendfile; WSGIScriptFile *access_script; WSGIScriptFile *auth_user_script; WSGIScriptFile *auth_group_script; int user_authoritative; int group_authoritative; #if AP_SERVER_MAJORVERSION_NUMBER >= 2 apr_hash_t *handler_scripts; #endif } WSGIDirectoryConfig; static WSGIDirectoryConfig *newWSGIDirectoryConfig(apr_pool_t *p) { WSGIDirectoryConfig *object = NULL; object = (WSGIDirectoryConfig *)apr_pcalloc(p, sizeof(WSGIDirectoryConfig)); object->pool = p; object->process_group = NULL; object->application_group = NULL; object->callable_object = NULL; object->dispatch_script = NULL; object->pass_apache_request = -1; object->pass_authorization = -1; object->script_reloading = -1; object->error_override = -1; object->chunked_request = -1; object->enable_sendfile = -1; object->access_script = NULL; object->auth_user_script = NULL; object->auth_group_script = NULL; object->user_authoritative = -1; object->group_authoritative = -1; return object; } static void *wsgi_create_dir_config(apr_pool_t *p, char *dir) { WSGIDirectoryConfig *config = NULL; config = newWSGIDirectoryConfig(p); return config; } static void *wsgi_merge_dir_config(apr_pool_t *p, void *base_conf, void *new_conf) { WSGIDirectoryConfig *config = NULL; WSGIDirectoryConfig *parent = NULL; WSGIDirectoryConfig *child = NULL; config = newWSGIDirectoryConfig(p); parent = (WSGIDirectoryConfig *)base_conf; child = (WSGIDirectoryConfig *)new_conf; if (child->restrict_process) config->restrict_process = child->restrict_process; else config->restrict_process = parent->restrict_process; if (child->process_group) config->process_group = child->process_group; else config->process_group = parent->process_group; if (child->application_group) config->application_group = child->application_group; else config->application_group = parent->application_group; if (child->callable_object) config->callable_object = child->callable_object; else config->callable_object = parent->callable_object; if (child->dispatch_script) config->dispatch_script = child->dispatch_script; else config->dispatch_script = parent->dispatch_script; if (child->pass_apache_request != -1) config->pass_apache_request = child->pass_apache_request; else config->pass_apache_request = parent->pass_apache_request; if (child->pass_authorization != -1) config->pass_authorization = child->pass_authorization; else config->pass_authorization = parent->pass_authorization; if (child->script_reloading != -1) config->script_reloading = child->script_reloading; else config->script_reloading = parent->script_reloading; if (child->error_override != -1) config->error_override = child->error_override; else config->error_override = parent->error_override; if (child->chunked_request != -1) config->chunked_request = child->chunked_request; else config->chunked_request = parent->chunked_request; if (child->enable_sendfile != -1) config->enable_sendfile = child->enable_sendfile; else config->enable_sendfile = parent->enable_sendfile; if (child->access_script) config->access_script = child->access_script; else config->access_script = parent->access_script; if (child->auth_user_script) config->auth_user_script = child->auth_user_script; else config->auth_user_script = parent->auth_user_script; if (child->auth_group_script) config->auth_group_script = child->auth_group_script; else config->auth_group_script = parent->auth_group_script; if (child->user_authoritative != -1) config->user_authoritative = child->user_authoritative; else config->user_authoritative = parent->user_authoritative; if (child->group_authoritative != -1) config->group_authoritative = child->group_authoritative; else config->group_authoritative = parent->group_authoritative; #if AP_SERVER_MAJORVERSION_NUMBER >= 2 if (!child->handler_scripts) config->handler_scripts = parent->handler_scripts; else if (!parent->handler_scripts) config->handler_scripts = child->handler_scripts; else { config->handler_scripts = apr_hash_overlay(p, child->handler_scripts, parent->handler_scripts); } #endif return config; } typedef struct { apr_pool_t *pool; apr_table_t *restrict_process; const char *process_group; const char *application_group; const char *callable_object; WSGIScriptFile *dispatch_script; int pass_apache_request; int pass_authorization; int script_reloading; int error_override; int chunked_request; int enable_sendfile; WSGIScriptFile *access_script; WSGIScriptFile *auth_user_script; WSGIScriptFile *auth_group_script; int user_authoritative; int group_authoritative; #if AP_SERVER_MAJORVERSION_NUMBER >= 2 apr_hash_t *handler_scripts; #endif const char *handler_script; } WSGIRequestConfig; static int wsgi_find_path_info(const char *uri, const char *path_info) { int lu = strlen(uri); int lp = strlen(path_info); while (lu-- && lp-- && uri[lu] == path_info[lp]) { if (path_info[lp] == '/') { while (lu && uri[lu-1] == '/') lu--; } } if (lu == -1) { lu = 0; } while (uri[lu] != '\0' && uri[lu] != '/') { lu++; } return lu; } static const char *wsgi_script_name(request_rec *r) { char *script_name = NULL; int path_info_start = 0; if (!r->path_info || !*r->path_info) { script_name = apr_pstrdup(r->pool, r->uri); } else { path_info_start = wsgi_find_path_info(r->uri, r->path_info); script_name = apr_pstrndup(r->pool, r->uri, path_info_start); } if (*script_name) { while (*script_name && (*(script_name+1) == '/')) script_name++; script_name = apr_pstrdup(r->pool, script_name); ap_no2slash((char*)script_name); } ap_str_tolower(script_name); return script_name; } static const char *wsgi_process_group(request_rec *r, const char *s) { const char *name = NULL; const char *value = NULL; if (!s) return ""; if (*s != '%') return s; name = s + 1; if (*name) { if (!strcmp(name, "{GLOBAL}")) return ""; if (strstr(name, "{ENV:") == name) { int len = 0; name = name + 5; len = strlen(name); if (len && name[len-1] == '}') { name = apr_pstrndup(r->pool, name, len-1); value = apr_table_get(r->notes, name); if (!value) value = apr_table_get(r->subprocess_env, name); if (!value) value = getenv(name); if (value) { if (*value == '%' && strstr(value, "%{ENV:") != value) return wsgi_process_group(r, value); return value; } } } } return s; } static const char *wsgi_server_group(request_rec *r, const char *s) { const char *name = NULL; const char *value = NULL; const char *h = NULL; apr_port_t p = 0; if (!s) return ""; if (*s != '%') return s; name = s + 1; if (*name) { if (!strcmp(name, "{SERVER}")) { h = r->server->server_hostname; p = ap_get_server_port(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u", h, p); else return h; } if (!strcmp(name, "{GLOBAL}")) return ""; } return s; } static const char *wsgi_application_group(request_rec *r, const char *s) { const char *name = NULL; const char *value = NULL; const char *h = NULL; apr_port_t p = 0; const char *n = NULL; if (!s) { h = r->server->server_hostname; p = ap_get_server_port(r); n = wsgi_script_name(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u|%s", h, p, n); else return apr_psprintf(r->pool, "%s|%s", h, n); } if (*s != '%') return s; name = s + 1; if (*name) { if (!strcmp(name, "{RESOURCE}")) { h = r->server->server_hostname; p = ap_get_server_port(r); n = wsgi_script_name(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u|%s", h, p, n); else return apr_psprintf(r->pool, "%s|%s", h, n); } if (!strcmp(name, "{SERVER}")) { h = r->server->server_hostname; p = ap_get_server_port(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u", h, p); else return h; } if (!strcmp(name, "{GLOBAL}")) return ""; if (strstr(name, "{ENV:") == name) { int len = 0; name = name + 5; len = strlen(name); if (len && name[len-1] == '}') { name = apr_pstrndup(r->pool, name, len-1); value = apr_table_get(r->notes, name); if (!value) value = apr_table_get(r->subprocess_env, name); if (!value) value = getenv(name); if (value) { if (*value == '%' && strstr(value, "%{ENV:") != value) return wsgi_application_group(r, value); return value; } } } } return s; } static const char *wsgi_callable_object(request_rec *r, const char *s) { const char *name = NULL; const char *value = NULL; if (!s) return "application"; if (*s != '%') return s; name = s + 1; if (!*name) return "application"; if (strstr(name, "{ENV:") == name) { int len = 0; name = name + 5; len = strlen(name); if (len && name[len-1] == '}') { name = apr_pstrndup(r->pool, name, len-1); value = apr_table_get(r->notes, name); if (!value) value = apr_table_get(r->subprocess_env, name); if (!value) value = getenv(name); if (value) return value; } } return "application"; } static WSGIRequestConfig *wsgi_create_req_config(apr_pool_t *p, request_rec *r) { WSGIRequestConfig *config = NULL; WSGIServerConfig *sconfig = NULL; WSGIDirectoryConfig *dconfig = NULL; config = (WSGIRequestConfig *)apr_pcalloc(p, sizeof(WSGIRequestConfig)); dconfig = ap_get_module_config(r->per_dir_config, &wsgi_module); sconfig = ap_get_module_config(r->server->module_config, &wsgi_module); config->pool = p; config->restrict_process = dconfig->restrict_process; if (!config->restrict_process) config->restrict_process = sconfig->restrict_process; config->process_group = dconfig->process_group; if (!config->process_group) config->process_group = sconfig->process_group; config->process_group = wsgi_process_group(r, config->process_group); config->application_group = dconfig->application_group; if (!config->application_group) config->application_group = sconfig->application_group; config->application_group = wsgi_application_group(r, config->application_group); config->callable_object = dconfig->callable_object; if (!config->callable_object) config->callable_object = sconfig->callable_object; config->callable_object = wsgi_callable_object(r, config->callable_object); config->dispatch_script = dconfig->dispatch_script; if (!config->dispatch_script) config->dispatch_script = sconfig->dispatch_script; config->pass_apache_request = dconfig->pass_apache_request; if (config->pass_apache_request < 0) { config->pass_apache_request = sconfig->pass_apache_request; if (config->pass_apache_request < 0) config->pass_apache_request = 0; } config->pass_authorization = dconfig->pass_authorization; if (config->pass_authorization < 0) { config->pass_authorization = sconfig->pass_authorization; if (config->pass_authorization < 0) config->pass_authorization = 0; } config->script_reloading = dconfig->script_reloading; if (config->script_reloading < 0) { config->script_reloading = sconfig->script_reloading; if (config->script_reloading < 0) config->script_reloading = 1; } config->error_override = dconfig->error_override; if (config->error_override < 0) { config->error_override = sconfig->error_override; if (config->error_override < 0) config->error_override = 0; } config->chunked_request = dconfig->chunked_request; if (config->chunked_request < 0) { config->chunked_request = sconfig->chunked_request; if (config->chunked_request < 0) config->chunked_request = 0; } config->enable_sendfile = dconfig->enable_sendfile; if (config->enable_sendfile < 0) { config->enable_sendfile = sconfig->enable_sendfile; if (config->enable_sendfile < 0) config->enable_sendfile = 0; } config->access_script = dconfig->access_script; config->auth_user_script = dconfig->auth_user_script; config->auth_group_script = dconfig->auth_group_script; config->user_authoritative = dconfig->user_authoritative; if (config->user_authoritative == -1) config->user_authoritative = 1; config->group_authoritative = dconfig->group_authoritative; if (config->group_authoritative == -1) config->group_authoritative = 1; #if AP_SERVER_MAJORVERSION_NUMBER >= 2 if (!dconfig->handler_scripts) config->handler_scripts = sconfig->handler_scripts; else if (!sconfig->handler_scripts) config->handler_scripts = dconfig->handler_scripts; else { config->handler_scripts = apr_hash_overlay(p, dconfig->handler_scripts, sconfig->handler_scripts); } #endif config->handler_script = ""; return config; } /* * Apache 2.X and UNIX specific definitions related to * distinct daemon processes. */ #if defined(MOD_WSGI_WITH_DAEMONS) #include "unixd.h" #include "scoreboard.h" #include "mpm_common.h" #include "apr_proc_mutex.h" #include "apr_thread_cond.h" #include "apr_atomic.h" #include "http_connection.h" #include "apr_buckets.h" #include "apr_poll.h" #include "apr_signal.h" #include "http_vhost.h" #if APR_MAJOR_VERSION < 1 #define apr_atomic_cas32 apr_atomic_cas #endif #if APR_HAVE_SYS_SOCKET_H #include #endif #if APR_HAVE_UNISTD_H #include #endif #if APR_HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_SEM_H #include #endif #include #include #ifndef WSGI_LISTEN_BACKLOG #define WSGI_LISTEN_BACKLOG 100 #endif #ifndef WSGI_CONNECT_ATTEMPTS #define WSGI_CONNECT_ATTEMPTS 15 #endif #define WSGI_STACK_HEAD 0xffff #define WSGI_STACK_LAST 0xffff #define WSGI_STACK_TERMINATED 0x10000 #define WSGI_STACK_NO_LISTENER 0x20000 typedef struct { server_rec *server; long random; int id; const char *name; const char *user; uid_t uid; const char *group; gid_t gid; const char *groups_list; int groups_count; gid_t *groups; int processes; int multiprocess; int threads; int umask; const char *root; const char *home; const char *lang; const char *locale; const char *python_home; const char *python_path; const char *python_eggs; int stack_size; int maximum_requests; int shutdown_timeout; apr_time_t deadlock_timeout; apr_time_t inactivity_timeout; const char *display_name; int send_buffer_size; int recv_buffer_size; const char *script_user; const char *script_group; int cpu_time_limit; int cpu_priority; rlim_t memory_limit; rlim_t virtual_memory_limit; const char *socket; int listener_fd; const char* mutex_path; apr_proc_mutex_t* mutex; } WSGIProcessGroup; typedef struct { WSGIProcessGroup *group; int instance; apr_proc_t process; apr_socket_t *listener; } WSGIDaemonProcess; typedef struct { int id; WSGIDaemonProcess *process; apr_thread_t *thread; int running; int next; int wakeup; apr_thread_cond_t *condition; apr_thread_mutex_t *mutex; } WSGIDaemonThread; typedef struct { apr_uint32_t state; } WSGIThreadStack; typedef struct { const char *name; const char *socket; int fd; } WSGIDaemonSocket; static int wsgi_daemon_count = 0; static apr_hash_t *wsgi_daemon_index = NULL; static apr_hash_t *wsgi_daemon_listeners = NULL; static WSGIDaemonProcess *wsgi_daemon_process = NULL; static int volatile wsgi_request_count = 0; static WSGIDaemonThread *wsgi_worker_threads = NULL; static WSGIThreadStack *wsgi_worker_stack = NULL; #endif /* Class objects used by response handler. */ static PyTypeObject Dispatch_Type; typedef struct { PyObject_HEAD const char *target; request_rec *r; int level; char *s; int l; int expired; #if PY_MAJOR_VERSION < 3 int softspace; #endif } LogObject; static PyTypeObject Log_Type; static PyObject *newLogObject(request_rec *r, int level, const char *target) { LogObject *self; #if PY_MAJOR_VERSION >= 3 PyObject *module = NULL; PyObject *dict = NULL; PyObject *object = NULL; PyObject *args = NULL; PyObject *result = NULL; module = PyImport_ImportModule("io"); if (!module) return NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "TextIOWrapper"); if (!object) { PyErr_SetString(PyExc_NameError, "name 'TextIOWrapper' is not defined"); return NULL; } #endif self = PyObject_New(LogObject, &Log_Type); if (self == NULL) return NULL; self->target = target; self->r = r; self->level = APLOG_NOERRNO|level; self->s = NULL; self->l = 0; self->expired = 0; #if PY_MAJOR_VERSION < 3 self->softspace = 0; #endif #if PY_MAJOR_VERSION >= 3 Py_INCREF(object); args = Py_BuildValue("(OssOO)", self, "utf-8", "replace", Py_None, Py_True); Py_DECREF(self); result = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); return result; #else return (PyObject *)self; #endif } #if 0 static void Log_file(LogObject *self, const char *s, int l) { /* * XXX This function is not currently being used. * The intention was that it be called instead of * Log_call() when 'target' is non zero. This would * be the case for 'stdout' and 'stderr'. Doing * this bypasses normally Apache logging mechanisms * though. May reawaken this code in mod_wsgi 4.0 * by way of a mechanism to divert logging from a * daemon process to specfic log file or pipe using * an option to WSGIDaemonProcess. */ char errstr[MAX_STRING_LEN]; int plen = 0; int slen = 0; #if AP_SERVER_MAJORVERSION_NUMBER < 2 FILE *logf; #else apr_file_t *logf = NULL; #endif if (self->r) logf = self->r->server->error_log; else logf = wsgi_server->error_log; #if AP_SERVER_MAJORVERSION_NUMBER < 2 plen = ap_snprintf(errstr, sizeof(errstr), "[%s] ", ap_get_time()); #else errstr[0] = '['; ap_recent_ctime(errstr + 1, apr_time_now()); errstr[1 + APR_CTIME_LEN - 1] = ']'; errstr[1 + APR_CTIME_LEN ] = ' '; plen = 1 + APR_CTIME_LEN + 1; #endif if (self->target) { int len; errstr[plen++] = '['; len = strlen(self->target); memcpy(errstr+plen, self->target, len); plen += len; errstr[plen++] = ']'; errstr[plen++] = ' '; } slen = MAX_STRING_LEN - plen - 1; Py_BEGIN_ALLOW_THREADS /* * We actually break long lines up into segments * of around 8192 characters, with the date/time * and target information prefixing each line. * This is just to avoid having to allocate more * memory just to format the line with prefix. * We want to avoid writing the prefix separately * so at least try and write line in one atomic * operation. */ while (1) { if (l > slen) { memcpy(errstr+plen, s, slen); errstr[plen+slen] = '\n'; #if AP_SERVER_MAJORVERSION_NUMBER < 2 fwrite(errstr, plen+slen+1, 1, logf); fflush(logf); #else apr_file_write_full(logf, errstr, plen+slen+1, NULL); apr_file_flush(logf); #endif s += slen; l -= slen; } else { memcpy(errstr+plen, s, l); errstr[plen+l] = '\n'; #if AP_SERVER_MAJORVERSION_NUMBER < 2 fwrite(errstr, plen+l+1, 1, logf); fflush(logf); #else apr_file_write_full(logf, errstr, plen+l+1, NULL); apr_file_flush(logf); #endif break; } } Py_END_ALLOW_THREADS } #endif static void Log_call(LogObject *self, const char *s, int l) { /* * The length of the string to be logged is ignored * for now. We just pass the whole string to the * Apache error log functions. It will actually * truncate it at some value less than 8192 * characters depending on the length of the prefix * to go at the front. If there are embedded NULLs * then truncation will occur at that point. That * truncation occurs like this is also what happens * if using FASTCGI solutions for Apache, so not * doing anything different here. */ if (self->r) { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_LEVEL(self->level), self->r, "%s", s); Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_LEVEL(self->level), wsgi_server, "%s", s); Py_END_ALLOW_THREADS } } static void Log_dealloc(LogObject *self) { if (self->s) { if (!self->expired) Log_call(self, self->s, self->l); free(self->s); } PyObject_Del(self); } static PyObject *Log_flush(LogObject *self, PyObject *args) { if (self->expired) { PyErr_SetString(PyExc_RuntimeError, "log object has expired"); return NULL; } if (!PyArg_ParseTuple(args, ":flush")) return NULL; if (self->s) { Log_call(self, self->s, self->l); free(self->s); self->s = NULL; self->l = 0; } Py_INCREF(Py_None); return Py_None; } static PyObject *Log_close(LogObject *self, PyObject *args) { PyObject *result = NULL; if (!PyArg_ParseTuple(args, ":close")) return NULL; if (!self->expired) result = Log_flush(self, args); Py_XDECREF(result); self->r = NULL; self->expired = 1; Py_INCREF(Py_None); return Py_None; } static PyObject *Log_isatty(LogObject *self, PyObject *args) { PyObject *result = NULL; if (!PyArg_ParseTuple(args, ":isatty")) return NULL; Py_INCREF(Py_False); return Py_False; } static void Log_queue(LogObject *self, const char *msg, int len) { const char *p = NULL; const char *q = NULL; const char *e = NULL; p = msg; e = p + len; /* * Break string on newline. This is on assumption * that primarily textual information being logged. */ q = p; while (q != e) { if (*q == '\n') break; q++; } while (q != e) { /* Output each complete line. */ if (self->s) { /* Need to join with buffered value. */ int m = 0; int n = 0; char *s = NULL; m = self->l; n = m+q-p+1; s = (char *)malloc(n); memcpy(s, self->s, m); memcpy(s+m, p, q-p); s[n-1] = '\0'; free(self->s); self->s = NULL; self->l = 0; Log_call(self, s, n-1); free(s); } else { int n = 0; char *s = NULL; n = q-p+1; s = (char *)malloc(n); memcpy(s, p, q-p); s[n-1] = '\0'; Log_call(self, s, n-1); free(s); } p = q+1; /* Break string on newline. */ q = p; while (q != e) { if (*q == '\n') break; q++; } } if (p != e) { /* Save away incomplete line. */ if (self->s) { /* Need to join with buffered value. */ int m = 0; int n = 0; m = self->l; n = m+e-p+1; self->s = (char *)realloc(self->s, n); memcpy(self->s+m, p, e-p); self->s[n-1] = '\0'; self->l = n-1; } else { int n = 0; n = e-p+1; self->s = (char *)malloc(n); memcpy(self->s, p, n-1); self->s[n-1] = '\0'; self->l = n-1; } } } static PyObject *Log_write(LogObject *self, PyObject *args) { const char *msg = NULL; int len = -1; if (self->expired) { PyErr_SetString(PyExc_RuntimeError, "log object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "s#:write", &msg, &len)) return NULL; Log_queue(self, msg, len); Py_INCREF(Py_None); return Py_None; } static PyObject *Log_writelines(LogObject *self, PyObject *args) { PyObject *sequence = NULL; PyObject *iterator = NULL; PyObject *item = NULL; const char *msg = NULL; if (self->expired) { PyErr_SetString(PyExc_RuntimeError, "log object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:writelines", &sequence)) return NULL; iterator = PyObject_GetIter(sequence); if (iterator == NULL) { PyErr_SetString(PyExc_TypeError, "argument must be sequence of strings"); return NULL; } while ((item = PyIter_Next(iterator))) { PyObject *result = NULL; PyObject *args = NULL; args = PyTuple_Pack(1, item); result = Log_write(self, args); Py_DECREF(args); Py_DECREF(item); if (!result) { Py_DECREF(iterator); PyErr_SetString(PyExc_TypeError, "argument must be sequence of strings"); return NULL; } } Py_DECREF(iterator); Py_INCREF(Py_None); return Py_None; } #if PY_MAJOR_VERSION >= 3 static PyObject *Log_readable(LogObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, ":readable")) return NULL; Py_INCREF(Py_False); return Py_False; } static PyObject *Log_seekable(LogObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, ":seekable")) return NULL; Py_INCREF(Py_False); return Py_False; } static PyObject *Log_writable(LogObject *self, PyObject *args) { if (!PyArg_ParseTuple(args, ":writable")) return NULL; Py_INCREF(Py_True); return Py_True; } #endif static PyObject *Log_closed(LogObject *self, void *closure) { Py_INCREF(Py_False); return Py_False; } #if PY_MAJOR_VERSION < 3 static PyObject *Log_get_softspace(LogObject *self, void *closure) { return PyInt_FromLong(self->softspace); } static int Log_set_softspace(LogObject *self, PyObject *value) { int new; if (value == NULL) { PyErr_SetString(PyExc_TypeError, "can't delete softspace attribute"); return -1; } new = PyInt_AsLong(value); if (new == -1 && PyErr_Occurred()) return -1; self->softspace = new; return 0; } #else static PyObject *Log_get_encoding(LogObject *self, void *closure) { return PyUnicode_FromString("utf-8"); } static PyObject *Log_get_errors(LogObject *self, void *closure) { return PyUnicode_FromString("replace"); } #endif static PyMethodDef Log_methods[] = { { "flush", (PyCFunction)Log_flush, METH_VARARGS, 0 }, { "close", (PyCFunction)Log_close, METH_VARARGS, 0 }, { "isatty", (PyCFunction)Log_isatty, METH_VARARGS, 0 }, { "write", (PyCFunction)Log_write, METH_VARARGS, 0 }, { "writelines", (PyCFunction)Log_writelines, METH_VARARGS, 0 }, #if PY_MAJOR_VERSION >= 3 { "readable", (PyCFunction)Log_readable, METH_VARARGS, 0 }, { "seekable", (PyCFunction)Log_seekable, METH_VARARGS, 0 }, { "writable", (PyCFunction)Log_writable, METH_VARARGS, 0 }, #endif { NULL, NULL} }; static PyGetSetDef Log_getset[] = { { "closed", (getter)Log_closed, NULL, 0 }, #if PY_MAJOR_VERSION < 3 { "softspace", (getter)Log_get_softspace, (setter)Log_set_softspace, 0 }, #else { "encoding", (getter)Log_get_encoding, NULL, 0 }, { "errors", (getter)Log_get_errors, NULL, 0 }, #endif { NULL }, }; static PyTypeObject Log_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Log", /*tp_name*/ sizeof(LogObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Log_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ Log_methods, /*tp_methods*/ 0, /*tp_members*/ Log_getset, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; static void wsgi_log_python_error(request_rec *r, PyObject *log, const char *filename) { PyObject *m = NULL; PyObject *result = NULL; PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; PyObject *xlog = NULL; if (!PyErr_Occurred()) return; if (!log) { PyErr_Fetch(&type, &value, &traceback); xlog = newLogObject(r, APLOG_ERR, NULL); log = xlog; PyErr_Restore(type, value, traceback); type = NULL; value = NULL; traceback = NULL; } if (PyErr_ExceptionMatches(PyExc_SystemExit)) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): SystemExit exception raised by " "WSGI script '%s' ignored.", getpid(), filename); } else { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): SystemExit exception raised by " "WSGI script '%s' ignored.", getpid(), filename); } Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Exception occurred processing " "WSGI script '%s'.", getpid(), filename); } else { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Exception occurred processing " "WSGI script '%s'.", getpid(), filename); } Py_END_ALLOW_THREADS } PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); if (!value) { value = Py_None; Py_INCREF(value); } if (!traceback) { traceback = Py_None; Py_INCREF(traceback); } m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_exception"); if (o) { PyObject *args = NULL; Py_INCREF(o); args = Py_BuildValue("(OOOOO)", type, value, traceback, Py_None, log); result = PyEval_CallObject(o, args); Py_DECREF(args); Py_DECREF(o); } } if (!result) { /* * If can't output exception and traceback then * use PyErr_Print to dump out details of the * exception. For SystemExit though if we do * that the process will actually be terminated * so can only clear the exception information * and keep going. */ PyErr_Restore(type, value, traceback); if (!PyErr_ExceptionMatches(PyExc_SystemExit)) { PyErr_Print(); PyErr_Clear(); } else { PyErr_Clear(); } } else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); } Py_XDECREF(result); Py_XDECREF(m); Py_XDECREF(xlog); } typedef struct { PyObject_HEAD request_rec *r; int init; int done; char *buffer; apr_size_t size; apr_size_t offset; apr_size_t length; } InputObject; static PyTypeObject Input_Type; static InputObject *newInputObject(request_rec *r) { InputObject *self; self = PyObject_New(InputObject, &Input_Type); if (self == NULL) return NULL; self->r = r; self->init = 0; self->done = 0; self->buffer = NULL; self->size = 0; self->offset = 0; self->length = 0; return self; } static void Input_dealloc(InputObject *self) { if (self->buffer) free(self->buffer); PyObject_Del(self); } static PyObject *Input_close(InputObject *self, PyObject *args) { if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, ":close")) return NULL; Py_INCREF(Py_None); return Py_None; } static PyObject *Input_read(InputObject *self, PyObject *args) { long size = -1; PyObject *result = NULL; char *buffer = NULL; apr_size_t length = 0; int init = 0; apr_size_t n; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "|l:read", &size)) return NULL; #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_inactivity_timeout) { apr_thread_mutex_lock(wsgi_shutdown_lock); wsgi_inactivity_shutdown_time = apr_time_now(); wsgi_inactivity_shutdown_time += wsgi_inactivity_timeout; apr_thread_mutex_unlock(wsgi_shutdown_lock); } #endif init = self->init; if (!self->init) { if (!ap_should_client_block(self->r)) self->done = 1; self->init = 1; } /* No point continuing if no more data to be consumed. */ if (self->done && self->length == 0) return PyString_FromString(""); /* * If requested size is zero bytes, then still need to pass * this through to Apache input filters so that any * 100-continue response is triggered. Only do this if very * first attempt to read data. Note that this will cause an * assertion failure in HTTP_IN input filter when Apache * maintainer mode is enabled. It is arguable that the * assertion check, which prohibits a zero length read, * shouldn't exist, as why should a zero length read be not * allowed if input filter processing still works when it * does occur. */ if (size == 0) { if (!init) { char dummy[1]; Py_BEGIN_ALLOW_THREADS n = ap_get_client_block(self->r, dummy, 0); Py_END_ALLOW_THREADS if (n == -1) { PyErr_SetString(PyExc_IOError, "request data read error"); return NULL; } } return PyString_FromString(""); } /* * First deal with case where size has been specified. After * that deal with case where expected that all remaining * data is to be read in and returned as one string. */ if (size > 0) { /* Allocate string of the exact size required. */ result = PyString_FromStringAndSize(NULL, size); if (!result) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); /* Copy any residual data from use of readline(). */ if (self->buffer && self->length) { if (size >= self->length) { length = self->length; memcpy(buffer, self->buffer + self->offset, length); self->offset = 0; self->length = 0; } else { length = size; memcpy(buffer, self->buffer + self->offset, length); self->offset += length; self->length -= length; } } /* If all data residual buffer consumed then free it. */ if (!self->length) { free(self->buffer); self->buffer = NULL; } /* Read in remaining data required to achieve size. */ if (length < size) { while (length != size) { Py_BEGIN_ALLOW_THREADS n = ap_get_client_block(self->r, buffer + length, size - length); Py_END_ALLOW_THREADS if (n == -1) { PyErr_SetString(PyExc_IOError, "request data read error"); Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; break; } length += n; } /* * Resize the final string. If the size reduction is * by more than 25% of the string size, then Python * will allocate a new block of memory and copy the * data into it. */ if (length != size) { if (_PyString_Resize(&result, length)) return NULL; } } } else { /* * Here we are going to try and read in all the * remaining data. First we have to allocate a suitably * large string, but we can't fully trust the amount * that the request structure says is remaining based on * the original content length though, as an input * filter can insert/remove data from the input stream * thereby invalidating the original content length. * What we do is allow for an extra 25% above what we * have already buffered and what the request structure * says is remaining. A value of 25% has been chosen so * as to match best how Python handles resizing of * strings. Note that even though we do this and allow * all available content, strictly speaking the WSGI * specification says we should only read up until content * length. This though is because the WSGI specification * is deficient in dealing with the concept of mutating * input filters. Since read() with no argument is also * not allowed by WSGI specification implement it in the * way which is most logical and ensure that input data * is not truncated. */ size = self->length; if (!self->r->read_chunked && self->r->remaining > 0) size += self->r->remaining; size = size + (size >> 2); if (size < 256) size = self->r->read_chunked ? 8192 : 256; /* Allocate string of the estimated size. */ result = PyString_FromStringAndSize(NULL, size); if (!result) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); /* * Copy any residual data from use of readline(). The * residual should always be less in size than the * string we have allocated to hold it, so can consume * all of it. */ if (self->buffer && self->length) { length = self->length; memcpy(buffer, self->buffer + self->offset, length); self->offset = 0; self->length = 0; free(self->buffer); self->buffer = NULL; } /* Now make first attempt at reading remaining data. */ Py_BEGIN_ALLOW_THREADS n = ap_get_client_block(self->r, buffer + length, size - length); Py_END_ALLOW_THREADS if (n == -1) { PyErr_SetString(PyExc_IOError, "request data read error"); Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; } length += n; /* * Don't just assume that all data has been read if * amount read was less than that requested. Still must * perform a read which returns that no more data found. */ while (!self->done) { if (length == size) { /* Increase the size of the string by 25%. */ size = size + (size >> 2); if (_PyString_Resize(&result, size)) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); } /* Now make succesive attempt at reading data. */ Py_BEGIN_ALLOW_THREADS n = ap_get_client_block(self->r, buffer + length, size - length); Py_END_ALLOW_THREADS if (n == -1) { PyErr_SetString(PyExc_IOError, "request data read error"); Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; } length += n; } /* * Resize the final string. If the size reduction is by * more than 25% of the string size, then Python will * allocate a new block of memory and copy the data into * it. */ if (length != size) { if (_PyString_Resize(&result, length)) return NULL; } } return result; } static PyObject *Input_readline(InputObject *self, PyObject *args) { long size = -1; PyObject *result = NULL; char *buffer = NULL; apr_size_t length = 0; apr_size_t n; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "|l:readline", &size)) return NULL; if (!self->init) { if (!ap_should_client_block(self->r)) self->done = 1; self->init = 1; } /* * No point continuing if requested size is zero or if no * more data to read and no buffered data. */ if ((self->done && self->length == 0) || size == 0) return PyString_FromString(""); /* * First deal with case where size has been specified. After * that deal with case where expected that a complete line * is returned regardless of the size. */ if (size > 0) { /* Allocate string of the exact size required. */ result = PyString_FromStringAndSize(NULL, size); if (!result) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); /* Copy any residual data from use of readline(). */ if (self->buffer && self->length) { char *p = NULL; const char *q = NULL; p = buffer; q = self->buffer + self->offset; while (self->length && length < size) { self->offset++; self->length--; length++; if ((*p++ = *q++) == '\n') break; } /* If all data in residual buffer consumed then free it. */ if (!self->length) { free(self->buffer); self->buffer = NULL; } } /* * Read in remaining data required to achieve size. Note * that can't just return whatever the first read might * have returned if no EOL encountered as must return * exactly the required size if no EOL unless that would * have exhausted all input. */ while ((!length || buffer[length-1] != '\n') && !self->done && length < size) { char *p = NULL; char *q = NULL; Py_BEGIN_ALLOW_THREADS n = ap_get_client_block(self->r, buffer + length, size - length); Py_END_ALLOW_THREADS if (n == -1) { PyErr_SetString(PyExc_IOError, "request data read error"); Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; } else { /* * Search for embedded EOL in what was read and if * found copy any residual into a buffer for use * next time the read functions are called. */ p = buffer + length; q = p + n; while (p != q) { length++; if (*p++ == '\n') break; } if (p != q) { self->size = q - p; self->buffer = (char *)malloc(self->size); self->offset = 0; self->length = self->size; memcpy(self->buffer, p, self->size); } } } /* * Resize the final string. If the size reduction is * by more than 25% of the string size, then Python * will allocate a new block of memory and copy the * data into it. */ if (length != size) { if (_PyString_Resize(&result, length)) return NULL; } } else { /* * Here we have to read in a line but where we have no * idea how long it may be. What we can do first is if * we have any residual data from a previous read * operation, see if it contains an EOL. This means we * have to do a search, but this is likely going to be * better than having to resize and copy memory later on. */ if (self->buffer && self->length) { const char *p = NULL; const char *q = NULL; p = self->buffer + self->offset; q = memchr(p, '\n', self->length); if (q) size = q - p; } /* * If residual data buffer didn't contain an EOL, all we * can do is allocate a reasonably sized string and if * that isn't big enough keep increasing it in size. For * this we will start out with a buffer 25% greater in * size than what is stored in the residual data buffer * or one the same size as Apache string size, whichever * is greater. */ if (self->buffer && size < 0) { size = self->length; size = size + (size >> 2); } if (size < HUGE_STRING_LEN) size = HUGE_STRING_LEN; /* Allocate string of the initial size. */ result = PyString_FromStringAndSize(NULL, size); if (!result) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); /* Copy any residual data from use of readline(). */ if (self->buffer && self->length) { char *p = NULL; const char *q = NULL; p = buffer; q = self->buffer + self->offset; while (self->length && length < size) { self->offset++; self->length--; length++; if ((*p++ = *q++) == '\n') break; } /* If all data in residual buffer consumed then free it. */ if (!self->length) { free(self->buffer); self->buffer = NULL; } } /* * Read in remaining data until find an EOL, or until all * data has been consumed. */ while ((!length || buffer[length-1] != '\n') && !self->done) { char *p = NULL; char *q = NULL; Py_BEGIN_ALLOW_THREADS n = ap_get_client_block(self->r, buffer + length, size - length); Py_END_ALLOW_THREADS if (n == -1) { PyErr_SetString(PyExc_IOError, "request data read error"); Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; } else { /* * Search for embedded EOL in what was read and if * found copy any residual into a buffer for use * next time the read functions are called. */ p = buffer + length; q = p + n; while (p != q) { length++; if (*p++ == '\n') break; } if (p != q) { self->size = q - p; self->buffer = (char *)malloc(self->size); self->offset = 0; self->length = self->size; memcpy(self->buffer, p, self->size); } if (buffer[length-1] != '\n' && length == size) { /* Increase size of string and keep going. */ size = size + (size >> 2); if (_PyString_Resize(&result, size)) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); } } } /* * Resize the final string. If the size reduction is by * more than 25% of the string size, then Python will * allocate a new block of memory and copy the data into * it. */ if (length != size) { if (_PyString_Resize(&result, length)) return NULL; } } return result; } static PyObject *Input_readlines(InputObject *self, PyObject *args) { long hint = 0; long length = 0; PyObject *result = NULL; PyObject *line = NULL; PyObject *rlargs = NULL; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "|l:readlines", &hint)) return NULL; result = PyList_New(0); if (!result) return NULL; rlargs = PyTuple_New(0); if (!rlargs) { Py_DECREF(result); return NULL; } while (1) { int n; if (!(line = Input_readline(self, rlargs))) { Py_DECREF(result); result = NULL; break; } if ((n = PyString_Size(line)) == 0) { Py_DECREF(line); break; } if (PyList_Append(result, line) == -1) { Py_DECREF(line); Py_DECREF(result); result = NULL; break; } Py_DECREF(line); length += n; if (hint > 0 && length >= hint) break; } Py_DECREF(rlargs); return result; } static PyMethodDef Input_methods[] = { { "close", (PyCFunction)Input_close, METH_VARARGS, 0 }, { "read", (PyCFunction)Input_read, METH_VARARGS, 0 }, { "readline", (PyCFunction)Input_readline, METH_VARARGS, 0 }, { "readlines", (PyCFunction)Input_readlines, METH_VARARGS, 0 }, { NULL, NULL} }; static PyObject *Input_iter(InputObject *self) { if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } Py_INCREF(self); return (PyObject *)self; } static PyObject *Input_iternext(InputObject *self) { PyObject *line = NULL; PyObject *rlargs = NULL; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } rlargs = PyTuple_New(0); if (!rlargs) return NULL; line = Input_readline(self, rlargs); Py_DECREF(rlargs); if (!line) return NULL; if (PyString_GET_SIZE(line) == 0) { PyErr_SetObject(PyExc_StopIteration, Py_None); Py_DECREF(line); return NULL; } return line; } static PyTypeObject Input_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Input", /*tp_name*/ sizeof(InputObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Input_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ #if defined(Py_TPFLAGS_HAVE_ITER) Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, /*tp_flags*/ #else Py_TPFLAGS_DEFAULT, /*tp_flags*/ #endif 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ (getiterfunc)Input_iter, /*tp_iter*/ (iternextfunc)Input_iternext, /*tp_iternext*/ Input_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; typedef struct { PyObject_HEAD int result; request_rec *r; #if defined(MOD_WSGI_WITH_BUCKETS) apr_bucket_brigade *bb; #endif WSGIRequestConfig *config; InputObject *input; PyObject *log; int status; const char *status_line; PyObject *headers; PyObject *sequence; int content_length_set; apr_off_t content_length; apr_off_t output_length; } AdapterObject; static PyTypeObject Adapter_Type; typedef struct { PyObject_HEAD AdapterObject *adapter; PyObject *filelike; apr_size_t blksize; } StreamObject; static PyTypeObject Stream_Type; static AdapterObject *newAdapterObject(request_rec *r) { AdapterObject *self; self = PyObject_New(AdapterObject, &Adapter_Type); if (self == NULL) return NULL; self->result = HTTP_INTERNAL_SERVER_ERROR; self->r = r; #if defined(MOD_WSGI_WITH_BUCKETS) self->bb = NULL; #endif self->config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); self->status = HTTP_INTERNAL_SERVER_ERROR; self->status_line = NULL; self->headers = NULL; self->sequence = NULL; self->content_length_set = 0; self->content_length = 0; self->output_length = 0; self->input = newInputObject(r); self->log = newLogObject(r, APLOG_ERR, NULL); return self; } static void Adapter_dealloc(AdapterObject *self) { Py_XDECREF(self->headers); Py_XDECREF(self->sequence); Py_DECREF(self->input); Py_DECREF(self->log); PyObject_Del(self); } static PyObject *Adapter_start_response(AdapterObject *self, PyObject *args) { const char *status = NULL; PyObject *headers = NULL; PyObject *exc_info = NULL; PyObject *item = NULL; PyObject *latin_item = NULL; char* value = NULL; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "OO|O:start_response", &item, &headers, &exc_info)) { return NULL; } #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "expected byte string object for " "status, value containing non 'latin-1' characters " "found"); return NULL; } item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "expected byte string object for " "status, value of type %.200s found", item->ob_type->tp_name); Py_XDECREF(latin_item); return NULL; } status = PyString_AsString(item); if (!PyList_Check(headers)) { PyErr_SetString(PyExc_TypeError, "response headers must be a list"); Py_XDECREF(latin_item); return NULL; } if (exc_info && exc_info != Py_None) { if (self->status_line && !self->headers) { PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; if (!PyArg_ParseTuple(exc_info, "OOO", &type, &value, &traceback)) { Py_XDECREF(latin_item); return NULL; } Py_INCREF(type); Py_INCREF(value); Py_INCREF(traceback); PyErr_Restore(type, value, traceback); Py_XDECREF(latin_item); return NULL; } } else if (self->status_line && !self->headers) { PyErr_SetString(PyExc_RuntimeError, "headers have already been sent"); Py_XDECREF(latin_item); return NULL; } self->status_line = apr_pstrdup(self->r->pool, status); value = ap_getword(self->r->pool, &status, ' '); errno = 0; self->status = strtol(value, &value, 10); if (*value || errno == ERANGE) { PyErr_SetString(PyExc_TypeError, "status value is not an integer"); Py_XDECREF(latin_item); return NULL; } if (!*status) { PyErr_SetString(PyExc_ValueError, "status message was not supplied"); Py_XDECREF(latin_item); return NULL; } Py_XDECREF(self->headers); self->headers = headers; Py_INCREF(self->headers); Py_XDECREF(latin_item); return PyObject_GetAttrString((PyObject *)self, "write"); } static int Adapter_output(AdapterObject *self, const char *data, int length, int exception_when_aborted) { int i = 0; int n = 0; apr_status_t rv; request_rec *r; #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_inactivity_timeout) { apr_thread_mutex_lock(wsgi_shutdown_lock); wsgi_inactivity_shutdown_time = apr_time_now(); wsgi_inactivity_shutdown_time += wsgi_inactivity_timeout; apr_thread_mutex_unlock(wsgi_shutdown_lock); } #endif if (!self->status_line) { PyErr_SetString(PyExc_RuntimeError, "response has not been started"); return 0; } r = self->r; /* Have response headers yet been sent. */ if (self->headers) { /* * Apache prior to Apache 2.2.8 has a bug in it * whereby it doesn't force '100 Continue' * response before responding with headers if no * read. So, force a zero length read before * sending the headers if haven't yet attempted * to read anything. This will ensure that if no * request content has been read that any '100 * Continue' response will be flushed and sent * back to the client if client was expecting * one. Only want to do this for 2xx and 3xx * status values. Note that even though Apple * supplied version of Apache on MacOS X Leopard * is newer than version 2.2.8, the header file * has never been patched when they make updates * and so anything compiled against it thinks it * is older. */ #if (AP_SERVER_MAJORVERSION_NUMBER == 1) || \ (AP_SERVER_MAJORVERSION_NUMBER == 2 && \ AP_SERVER_MINORVERSION_NUMBER < 2) || \ (AP_SERVER_MAJORVERSION_NUMBER == 2 && \ AP_SERVER_MINORVERSION_NUMBER == 2 && \ AP_SERVER_PATCHLEVEL_NUMBER < 8) if (!self->input->init) { if (self->status >= 200 && self->status < 400) { PyObject *args = NULL; PyObject *result = NULL; args = Py_BuildValue("(i)", 0); result = Input_read(self->input, args); if (PyErr_Occurred()) PyErr_Clear(); Py_DECREF(args); Py_XDECREF(result); } } #endif /* Now setup response headers in request object. */ r->status = self->status; r->status_line = self->status_line; for (i = 0; i < PyList_Size(self->headers); i++) { PyObject *tuple = NULL; PyObject *object1 = NULL; PyObject *object2 = NULL; char *name = NULL; char *value = NULL; tuple = PyList_GetItem(self->headers, i); if (!PyTuple_Check(tuple)) { PyErr_Format(PyExc_TypeError, "list of tuple values " "expected, value of type %.200s found", tuple->ob_type->tp_name); return 0; } if (PyTuple_Size(tuple) != 2) { PyErr_Format(PyExc_ValueError, "tuple of length 2 " "expected, length is %d", (int)PyTuple_Size(tuple)); return 0; } object1 = PyTuple_GetItem(tuple, 0); object2 = PyTuple_GetItem(tuple, 1); if (PyString_Check(object1)) { name = PyString_AsString(object1); } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(object1)) { PyObject *latin_object; latin_object = PyUnicode_AsLatin1String(object1); if (!latin_object) { PyErr_Format(PyExc_TypeError, "header name " "contained non 'latin-1' characters "); return 0; } name = apr_pstrdup(r->pool, PyString_AsString(latin_object)); Py_DECREF(latin_object); } #endif else { PyErr_Format(PyExc_TypeError, "expected byte string object " "for header name, value of type %.200s " "found", object1->ob_type->tp_name); return 0; } if (PyString_Check(object2)) { value = PyString_AsString(object2); } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(object2)) { PyObject *latin_object; latin_object = PyUnicode_AsLatin1String(object2); if (!latin_object) { PyErr_Format(PyExc_TypeError, "header value " "contained non 'latin-1' characters "); return 0; } value = apr_pstrdup(r->pool, PyString_AsString(latin_object)); Py_DECREF(latin_object); } #endif else { PyErr_Format(PyExc_TypeError, "expected byte string object " "for header value, value of type %.200s " "found", object2->ob_type->tp_name); return 0; } if (strchr(name, '\n') != 0 || strchr(value, '\n') != 0) { PyErr_Format(PyExc_ValueError, "embedded newline in " "response header with name '%s' and value '%s'", name, value); return 0; } if (!strcasecmp(name, "Content-Type")) { #if AP_SERVER_MAJORVERSION_NUMBER < 2 r->content_type = apr_pstrdup(r->pool, value); #else /* * In a daemon child process we cannot call the * function ap_set_content_type() as want to * avoid adding any output filters based on the * type of file being served as this will be * done in the main Apache child process which * proxied the request to the daemon process. */ if (*self->config->process_group) r->content_type = apr_pstrdup(r->pool, value); else ap_set_content_type(r, apr_pstrdup(r->pool, value)); #endif } else if (!strcasecmp(name, "Content-Length")) { char *v = value; long l = 0; errno = 0; l = strtol(v, &v, 10); if (*v || errno == ERANGE || l < 0) { PyErr_SetString(PyExc_ValueError, "invalid content length"); return 0; } ap_set_content_length(r, l); self->content_length_set = 1; self->content_length = l; } else if (!strcasecmp(name, "WWW-Authenticate")) { apr_table_add(r->err_headers_out, name, value); } else { apr_table_add(r->headers_out, name, value); } } /* Need to force output of headers when using Apache 1.3. */ Py_BEGIN_ALLOW_THREADS ap_send_http_header(r); Py_END_ALLOW_THREADS /* * Reset flag indicating whether '100 Continue' response * expected. If we don't do this then if an attempt to read * input for the first time is after headers have been * sent, then Apache is wrongly generate the '100 Continue' * response into the response content. Not sure if this is * a bug in Apache, or that it truly believes that input * will never be read after the response headers have been * sent. */ r->expecting_100 = 0; /* No longer need headers now that they have been sent. */ Py_DECREF(self->headers); self->headers = NULL; } /* * If content length was specified, ensure that we don't * actually output more data than was specified as being * sent as otherwise technically in violation of HTTP RFC. */ if (length) { int output_length = length; if (self->content_length_set) { if (self->output_length < self->content_length) { if (self->output_length + length > self->content_length) { length = self->content_length - self->output_length; } } else length = 0; } self->output_length += output_length; } /* Now output any data. */ if (length) { #if defined(MOD_WSGI_WITH_BUCKETS) apr_bucket *b; /* * When using Apache 2.X can use lower level * bucket brigade APIs. This is preferred as * ap_rwrite()/ap_rflush() will grow memory in * the request pool on each call, which will * result in an increase in memory use over time * when streaming of data is being performed. * The memory is still reclaimed, but only at * the end of the request. Using bucket brigade * API avoids this, and also avoids any copying * of response data due to buffering performed * by ap_rwrite(). */ if (r->connection->aborted) { if (!exception_when_aborted) { ap_log_rerror(APLOG_MARK, WSGI_LOG_DEBUG(0), self->r, "mod_wsgi (pid=%d): Client closed connection.", getpid()); } else PyErr_SetString(PyExc_IOError, "client connection closed"); return 0; } if (!self->bb) { self->bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); } b = apr_bucket_transient_create(data, length, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(self->bb, b); b = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(self->bb, b); Py_BEGIN_ALLOW_THREADS rv = ap_pass_brigade(r->output_filters, self->bb); Py_END_ALLOW_THREADS if (rv != APR_SUCCESS) { PyErr_SetString(PyExc_IOError, "failed to write data"); return 0; } Py_BEGIN_ALLOW_THREADS apr_brigade_cleanup(self->bb); Py_END_ALLOW_THREADS #else /* * In Apache 1.3, the bucket brigade system doesn't exist, * so have no choice but to use ap_rwrite()/ap_rflush(). * It is not believed that Apache 1.3 suffers the memory * accumulation problem when streaming lots of data. */ Py_BEGIN_ALLOW_THREADS n = ap_rwrite(data, length, r); Py_END_ALLOW_THREADS if (n == -1) { PyErr_SetString(PyExc_IOError, "failed to write data"); return 0; } Py_BEGIN_ALLOW_THREADS n = ap_rflush(r); Py_END_ALLOW_THREADS if (n == -1) { PyErr_SetString(PyExc_IOError, "failed to flush data"); return 0; } #endif } /* * Check whether aborted connection was found when data * being written, otherwise will not be flagged until next * time that data is being written. Early detection is * better as it may have been the last data block being * written and application may think that data has all * been written. In a streaming application, we also want * to avoid any additional data processing to generate any * successive data. */ if (r->connection->aborted) { if (!exception_when_aborted) { ap_log_rerror(APLOG_MARK, WSGI_LOG_DEBUG(0), self->r, "mod_wsgi (pid=%d): Client closed connection.", getpid()); } else PyErr_SetString(PyExc_IOError, "client connection closed"); return 0; } return 1; } #if AP_SERVER_MAJORVERSION_NUMBER >= 2 /* Split buckets at 1GB when sending large files. */ #define MAX_BUCKET_SIZE (0x40000000) static int Adapter_output_file(AdapterObject *self, apr_file_t* tmpfile, apr_off_t offset, apr_off_t len) { request_rec *r; apr_bucket *b; apr_status_t rv; apr_bucket_brigade *bb; r = self->r; if (r->connection->aborted) { PyErr_SetString(PyExc_IOError, "client connection closed"); return 0; } if (len == 0) return 1; bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); if (sizeof(apr_off_t) == sizeof(apr_size_t) || len < MAX_BUCKET_SIZE) { /* Can use a single bucket to send file. */ b = apr_bucket_file_create(tmpfile, offset, (apr_size_t)len, r->pool, r->connection->bucket_alloc); } else { /* Need to create multiple buckets to send file. */ b = apr_bucket_file_create(tmpfile, offset, MAX_BUCKET_SIZE, r->pool, r->connection->bucket_alloc); while (len > MAX_BUCKET_SIZE) { apr_bucket *cb; apr_bucket_copy(b, &cb); APR_BRIGADE_INSERT_TAIL(bb, cb); b->start += MAX_BUCKET_SIZE; len -= MAX_BUCKET_SIZE; } /* Resize just the last bucket */ b->length = (apr_size_t)len; } APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); Py_BEGIN_ALLOW_THREADS rv = ap_pass_brigade(r->output_filters, bb); Py_END_ALLOW_THREADS if (rv != APR_SUCCESS) { PyErr_SetString(PyExc_IOError, "failed to write data"); return 0; } Py_BEGIN_ALLOW_THREADS apr_brigade_destroy(bb); Py_END_ALLOW_THREADS if (r->connection->aborted) { PyErr_SetString(PyExc_IOError, "client connection closed"); return 0; } return 1; } #endif #if AP_SERVER_MAJORVERSION_NUMBER >= 2 static APR_OPTIONAL_FN_TYPE(ssl_is_https) *wsgi_is_https = NULL; #endif static PyObject *Adapter_environ(AdapterObject *self) { request_rec *r = NULL; PyObject *vars = NULL; PyObject *object = NULL; const apr_array_header_t *head = NULL; const apr_table_entry_t *elts = NULL; int i = 0; const char *scheme = NULL; /* Create the WSGI environment dictionary. */ vars = PyDict_New(); /* Merge the CGI environment into the WSGI environment. */ r = self->r; head = apr_table_elts(r->subprocess_env); elts = (apr_table_entry_t *)head->elts; for (i = 0; i < head->nelts; ++i) { if (elts[i].key) { if (elts[i].val) { #if PY_MAJOR_VERSION >= 3 if (!strcmp(elts[i].val, "DOCUMENT_ROOT")) { object = PyUnicode_Decode(elts[i].val, strlen(elts[i].val), Py_FileSystemDefaultEncoding, "surrogateescape"); } else if (!strcmp(elts[i].val, "SCRIPT_FILENAME")) { object = PyUnicode_Decode(elts[i].val, strlen(elts[i].val), Py_FileSystemDefaultEncoding, "surrogateescape"); } else { object = PyUnicode_DecodeLatin1(elts[i].val, strlen(elts[i].val), NULL); } #else object = PyString_FromString(elts[i].val); #endif PyDict_SetItemString(vars, elts[i].key, object); Py_DECREF(object); } else PyDict_SetItemString(vars, elts[i].key, Py_None); } } PyDict_DelItemString(vars, "PATH"); /* Now setup all the WSGI specific environment values. */ object = Py_BuildValue("(ii)", 1, 0); PyDict_SetItemString(vars, "wsgi.version", object); Py_DECREF(object); object = PyBool_FromLong(wsgi_multithread); PyDict_SetItemString(vars, "wsgi.multithread", object); Py_DECREF(object); object = PyBool_FromLong(wsgi_multiprocess); PyDict_SetItemString(vars, "wsgi.multiprocess", object); Py_DECREF(object); #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process) { if (wsgi_daemon_process->group->threads == 1 && wsgi_daemon_process->group->maximum_requests == 1) { PyDict_SetItemString(vars, "wsgi.run_once", Py_True); } else PyDict_SetItemString(vars, "wsgi.run_once", Py_False); } else PyDict_SetItemString(vars, "wsgi.run_once", Py_False); #else PyDict_SetItemString(vars, "wsgi.run_once", Py_False); #endif scheme = apr_table_get(r->subprocess_env, "HTTPS"); if (scheme && (!strcasecmp(scheme, "On") || !strcmp(scheme, "1"))) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString("https"); #else object = PyString_FromString("https"); #endif PyDict_SetItemString(vars, "wsgi.url_scheme", object); Py_DECREF(object); } else { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString("http"); #else object = PyString_FromString("http"); #endif PyDict_SetItemString(vars, "wsgi.url_scheme", object); Py_DECREF(object); } /* * We remove the HTTPS variable because WSGI compliant * applications shouldn't rely on it. Instead they should * use wsgi.url_scheme. We do this even if SetEnv was * used to set HTTPS from Apache configuration. That is * we convert it into the correct variable and remove the * original. */ if (scheme) PyDict_DelItemString(vars, "HTTPS"); /* * Setup log object for WSGI errors. Don't decrement * reference to log object as keep reference to it. */ object = (PyObject *)self->log; PyDict_SetItemString(vars, "wsgi.errors", object); /* Setup input object for request content. */ object = (PyObject *)self->input; PyDict_SetItemString(vars, "wsgi.input", object); /* Setup file wrapper object for efficient file responses. */ object = PyObject_GetAttrString((PyObject *)self, "file_wrapper"); PyDict_SetItemString(vars, "wsgi.file_wrapper", object); Py_DECREF(object); /* Add mod_wsgi version information. */ object = Py_BuildValue("(ii)", MOD_WSGI_MAJORVERSION_NUMBER, MOD_WSGI_MINORVERSION_NUMBER); PyDict_SetItemString(vars, "mod_wsgi.version", object); Py_DECREF(object); /* * If Apache extensions are enabled and running in embedded * mode add a CObject reference to the Apache request_rec * structure instance. */ if (!wsgi_daemon_pool && self->config->pass_apache_request) { #if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2 object = PyCapsule_New(self->r, 0, 0); #else object = PyCObject_FromVoidPtr(self->r, 0); #endif PyDict_SetItemString(vars, "apache.request_rec", object); Py_DECREF(object); } /* * Extensions for accessing SSL certificate information from * mod_ssl when in use. */ #if 0 #if AP_SERVER_MAJORVERSION_NUMBER >= 2 if (!wsgi_daemon_pool) { object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https"); PyDict_SetItemString(vars, "mod_ssl.is_https", object); Py_DECREF(object); object = PyObject_GetAttrString((PyObject *)self, "ssl_var_lookup"); PyDict_SetItemString(vars, "mod_ssl.var_lookup", object); Py_DECREF(object); } #endif #endif return vars; } static int Adapter_process_file_wrapper(AdapterObject *self) { int done = 0; #ifndef WIN32 #if AP_SERVER_MAJORVERSION_NUMBER >= 2 PyObject *filelike = NULL; PyObject *method = NULL; PyObject *object = NULL; apr_status_t rv = 0; apr_os_file_t fd = -1; apr_file_t *tmpfile = NULL; apr_finfo_t finfo; apr_off_t fd_offset = 0; apr_off_t fo_offset = 0; apr_off_t length = 0; /* Perform file wrapper optimisations where possible. */ if (self->sequence->ob_type != &Stream_Type) return 0; /* * Only attempt to perform optimisations if the * write() function returned by start_response() * function has not been called with non zero length * data. In other words if no prior response content * generated. Technically it could be done, but want * to have a consistent rule about how specifying a * content length affects how much of a file is * sent. Don't want to have to take into * consideration whether write() function has been * called or not as just complicates things. */ if (self->output_length != 0) return 0; /* * Work out if file wrapper is associated with a * file like object, where that file object is * associated with a regular file. If it does then * we can optimise how the contents of the file are * sent out. If no such associated file descriptor * then it needs to be processed like any other * iterable value. */ filelike = ((StreamObject *)self->sequence)->filelike; fd = PyObject_AsFileDescriptor(filelike); if (fd == -1) { PyErr_Clear(); return 0; } /* * On some platforms, such as Linux, sendfile() system call * will not work on UNIX sockets. Thus when using daemon mode * cannot enable that feature. */ if (self->config->enable_sendfile) apr_os_file_put(&tmpfile, &fd, APR_SENDFILE_ENABLED, self->r->pool); else apr_os_file_put(&tmpfile, &fd, 0, self->r->pool); rv = apr_file_info_get(&finfo, APR_FINFO_SIZE|APR_FINFO_TYPE, tmpfile); if (rv != APR_SUCCESS || finfo.filetype != APR_REG) return 0; /* * Because Python file like objects potentially have * their own buffering layering, or use an operating * system FILE object which also has a buffering * layer on top of a normal file descriptor, need to * determine from the file like object its position * within the file and use that as starting position. * Note that it is assumed that user had flushed any * modifications to the file as necessary. Also, we * need to make sure we remember the original file * descriptor position as will need to restore that * position so it matches the upper buffering layers * when done. This is done to avoid any potential * problems if file like object does anything strange * in its close() method which relies on file position * being what it thought it should be. */ rv = apr_file_seek(tmpfile, APR_CUR, &fd_offset); if (rv != APR_SUCCESS) return 0; method = PyObject_GetAttrString(filelike, "tell"); if (!method) return 0; object = PyEval_CallObject(method, NULL); Py_DECREF(method); if (!object) { PyErr_Clear(); return 0; } if (PyLong_Check(object)) { #if defined(HAVE_LONG_LONG) fo_offset = PyLong_AsLongLong(object); #else fo_offset = PyLong_AsLong(object); #endif } #if PY_MAJOR_VERSION < 3 else if (PyInt_Check(object)) { fo_offset = PyInt_AsLong(object); } #endif else { Py_DECREF(object); return 0; } if (PyErr_Occurred()){ Py_DECREF(object); PyErr_Clear(); return 0; } Py_DECREF(object); /* * For a file wrapper object need to always ensure * that response headers are parsed. This is done so * that if the content length header has been * defined we can get its value and use it to limit * how much of a file is being sent. The WSGI 1.0 * specification says that we are meant to send all * available bytes from the file, however this is * questionable as sending more than content length * would violate HTTP RFC. Note that this doesn't * actually flush the headers out when using Apache * 2.X. This is good, as we want to still be able to * set the content length header if none set and file * is seekable. If processing response headers fails, * then need to return as if done, with error being * logged later. */ if (!Adapter_output(self, "", 0, 0)) return 1; /* * If content length wasn't defined then determine * the amount of data which is available to send and * set the content length response header. Either * way, if can work out length then send data * otherwise fall through and treat it as normal * iterable. */ if (!self->content_length_set) { length = finfo.size - fo_offset; self->output_length += length; ap_set_content_length(self->r, length); self->content_length_set = 1; self->content_length = length; if (Adapter_output_file(self, tmpfile, fo_offset, length)) self->result = OK; done = 1; } else { length = finfo.size - fo_offset; self->output_length += length; /* Use user specified content length instead. */ length = self->content_length; if (Adapter_output_file(self, tmpfile, fo_offset, length)) self->result = OK; done = 1; } /* * Restore position of underlying file descriptor. * If this fails, then not much we can do about it. */ apr_file_seek(tmpfile, APR_SET, &fd_offset); #endif #endif return done; } static int Adapter_run(AdapterObject *self, PyObject *object) { PyObject *vars = NULL; PyObject *start = NULL; PyObject *args = NULL; PyObject *iterator = NULL; PyObject *close = NULL; const char *msg = NULL; int length = 0; #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_inactivity_timeout) { apr_thread_mutex_lock(wsgi_shutdown_lock); wsgi_inactivity_shutdown_time = apr_time_now(); wsgi_inactivity_shutdown_time += wsgi_inactivity_timeout; apr_thread_mutex_unlock(wsgi_shutdown_lock); } #endif vars = Adapter_environ(self); start = PyObject_GetAttrString((PyObject *)self, "start_response"); args = Py_BuildValue("(OO)", vars, start); self->sequence = PyEval_CallObject(object, args); if (self->sequence != NULL) { if (!Adapter_process_file_wrapper(self)) { int aborted = 0; iterator = PyObject_GetIter(self->sequence); if (iterator != NULL) { PyObject *item = NULL; while ((item = PyIter_Next(iterator))) { #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "sequence of " "byte string values expected, value " "containing non 'latin-1' characters " "found"); Py_DECREF(item); break; } Py_DECREF(item); item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "sequence of byte " "string values expected, value of " "type %.200s found", item->ob_type->tp_name); Py_DECREF(item); break; } msg = PyString_AsString(item); length = PyString_Size(item); if (!msg) { Py_DECREF(item); break; } if (length && !Adapter_output(self, msg, length, 0)) { if (!PyErr_Occurred()) aborted = 1; Py_DECREF(item); break; } Py_DECREF(item); } } if (!PyErr_Occurred() && !aborted) { if (Adapter_output(self, "", 0, 0)) self->result = OK; } Py_XDECREF(iterator); } /* * Log warning if more response content generated than was * indicated, or less if there was no errors generated by * the application. */ if (self->content_length_set && ((!PyErr_Occurred() && self->output_length != self->content_length) || (self->output_length > self->content_length))) { ap_log_rerror(APLOG_MARK, WSGI_LOG_DEBUG(0), self->r, "mod_wsgi (pid=%d): Content length mismatch, " "expected %s, response generated %s: %s", getpid(), apr_off_t_toa(self->r->pool, self->content_length), apr_off_t_toa(self->r->pool, self->output_length), self->r->filename); } if (PyErr_Occurred()) { /* * Response content has already been sent, so cannot * return an internal server error as Apache will * append its own error page. Thus need to return OK * and just truncate the response. */ if (self->status_line && !self->headers) self->result = OK; wsgi_log_python_error(self->r, self->log, self->r->filename); } if (PyObject_HasAttrString(self->sequence, "close")) { PyObject *args = NULL; PyObject *data = NULL; close = PyObject_GetAttrString(self->sequence, "close"); args = Py_BuildValue("()"); data = PyEval_CallObject(close, args); Py_DECREF(args); Py_XDECREF(data); Py_DECREF(close); } if (PyErr_Occurred()) wsgi_log_python_error(self->r, self->log, self->r->filename); Py_DECREF(self->sequence); self->sequence = NULL; } Py_DECREF(args); Py_DECREF(start); Py_DECREF(vars); /* Log details of any final Python exceptions. */ if (PyErr_Occurred()) wsgi_log_python_error(self->r, self->log, self->r->filename); /* * If result indicates an internal server error, then * replace the status line in the request object else * that provided by the application will be what is used * in any error page automatically generated by Apache. */ if (self->result == HTTP_INTERNAL_SERVER_ERROR) self->r->status_line = "500 Internal Server Error"; return self->result; } static PyObject *Adapter_write(AdapterObject *self, PyObject *args) { PyObject *item = NULL; PyObject *latin_item = NULL; const char *data = NULL; int length = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:write", &item)) return NULL; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "byte string value expected, " "value containing non 'latin-1' characters found"); return NULL; } Py_DECREF(item); item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "byte string value expected, value " "of type %.200s found", item->ob_type->tp_name); Py_XDECREF(latin_item); return NULL; } data = PyString_AsString(item); length = PyString_Size(item); if (!Adapter_output(self, data, length, 1)) { Py_XDECREF(latin_item); return NULL; } Py_XDECREF(latin_item); Py_INCREF(Py_None); return Py_None; } static PyObject *newStreamObject(AdapterObject *adapter, PyObject *filelike, apr_size_t blksize); static PyObject *Adapter_file_wrapper(AdapterObject *self, PyObject *args) { PyObject *filelike = NULL; apr_size_t blksize = HUGE_STRING_LEN; PyObject *result = NULL; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O|l:file_wrapper", &filelike, &blksize)) return NULL; return newStreamObject(self, filelike, blksize); } #if AP_SERVER_MAJORVERSION_NUMBER >= 2 static PyObject *Adapter_ssl_is_https(AdapterObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, ":ssl_is_https")) return NULL; ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (ssl_is_https == 0) return Py_BuildValue("i", 0); return Py_BuildValue("i", ssl_is_https(self->r->connection)); } static PyObject *Adapter_ssl_var_lookup(AdapterObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *ssl_var_lookup = 0; PyObject *item = NULL; char *name = 0; char *value = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:ssl_var_lookup", &item)) return NULL; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "byte string value expected, " "value containing non 'latin-1' characters found"); Py_DECREF(item); return NULL; } Py_DECREF(item); item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "byte string value expected, value " "of type %.200s found", item->ob_type->tp_name); Py_DECREF(item); return NULL; } name = PyString_AsString(item); ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); if (ssl_var_lookup == 0) { Py_XINCREF(Py_None); return Py_None; } value = ssl_var_lookup(self->r->pool, self->r->server, self->r->connection, self->r, name); if (!value) { Py_XINCREF(Py_None); return Py_None; } #if PY_MAJOR_VERSION >= 3 return PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else return PyString_FromString(value); #endif } #endif static PyMethodDef Adapter_methods[] = { { "start_response", (PyCFunction)Adapter_start_response, METH_VARARGS, 0 }, { "write", (PyCFunction)Adapter_write, METH_VARARGS, 0 }, { "file_wrapper", (PyCFunction)Adapter_file_wrapper, METH_VARARGS, 0 }, #if AP_SERVER_MAJORVERSION_NUMBER >= 2 { "ssl_is_https", (PyCFunction)Adapter_ssl_is_https, METH_VARARGS, 0 }, { "ssl_var_lookup", (PyCFunction)Adapter_ssl_var_lookup, METH_VARARGS, 0 }, #endif { NULL, NULL} }; static PyTypeObject Adapter_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Adapter", /*tp_name*/ sizeof(AdapterObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Adapter_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ Adapter_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; static PyObject *newStreamObject(AdapterObject *adapter, PyObject *filelike, apr_size_t blksize) { StreamObject *self; self = PyObject_New(StreamObject, &Stream_Type); if (self == NULL) return NULL; self->adapter = adapter; self->filelike = filelike; self->blksize = blksize; Py_INCREF(self->adapter); Py_INCREF(self->filelike); return (PyObject *)self; } static void Stream_dealloc(StreamObject *self) { Py_DECREF(self->filelike); Py_DECREF(self->adapter); PyObject_Del(self); } static PyObject *Stream_iter(StreamObject *self) { if (!self->adapter->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } Py_INCREF(self); return (PyObject *)self; } static PyObject *Stream_iternext(StreamObject *self) { PyObject *method = NULL; PyObject *args = NULL; PyObject *result = NULL; if (!self->adapter->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } method = PyObject_GetAttrString(self->filelike, "read"); if (!method) { PyErr_SetString(PyExc_KeyError, "file like object has no read() method"); return 0; } args = Py_BuildValue("(l)", self->blksize); result = PyEval_CallObject(method, args); Py_DECREF(method); Py_DECREF(args); if (!result) return 0; if (PyString_Check(result)) { if (PyString_Size(result) == 0) { PyErr_SetObject(PyExc_StopIteration, Py_None); Py_DECREF(result); return 0; } return result; } #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(result)) { if (PyUnicode_GetSize(result) == 0) { PyErr_SetObject(PyExc_StopIteration, Py_None); Py_DECREF(result); return 0; } return result; } #endif Py_DECREF(result); PyErr_SetString(PyExc_TypeError, "file like object yielded non string type"); return 0; } static PyObject *Stream_close(StreamObject *self, PyObject *args) { PyObject *method = NULL; PyObject *result = NULL; method = PyObject_GetAttrString(self->filelike, "close"); if (method) { result = PyEval_CallObject(method, (PyObject *)NULL); if (!result) PyErr_Clear(); Py_DECREF(method); } Py_XDECREF(result); Py_INCREF(Py_None); return Py_None; } static PyMethodDef Stream_methods[] = { { "close", (PyCFunction)Stream_close, METH_VARARGS, 0 }, { NULL, NULL} }; static PyTypeObject Stream_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Stream", /*tp_name*/ sizeof(StreamObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Stream_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ #if defined(Py_TPFLAGS_HAVE_ITER) Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, /*tp_flags*/ #else Py_TPFLAGS_DEFAULT, /*tp_flags*/ #endif 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ (getiterfunc)Stream_iter, /*tp_iter*/ (iternextfunc)Stream_iternext, /*tp_iternext*/ Stream_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; /* Restricted object to stop access to STDIN/STDOUT. */ typedef struct { PyObject_HEAD const char *s; } RestrictedObject; static PyTypeObject Restricted_Type; static RestrictedObject *newRestrictedObject(const char *s) { RestrictedObject *self; self = PyObject_New(RestrictedObject, &Restricted_Type); if (self == NULL) return NULL; self->s = s; return self; } static void Restricted_dealloc(RestrictedObject *self) { PyObject_Del(self); } static PyObject *Restricted_getattr(RestrictedObject *self, char *name) { PyErr_Format(PyExc_IOError, "%s access restricted by mod_wsgi", self->s); return NULL; } static PyTypeObject Restricted_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Restricted", /*tp_name*/ sizeof(RestrictedObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Restricted_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ (getattrfunc)Restricted_getattr, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; /* Function to restrict access to use of signal(). */ static PyObject *wsgi_signal_intercept(PyObject *self, PyObject *args) { PyObject *h = NULL; int n = 0; PyObject *m = NULL; if (!PyArg_ParseTuple(args, "iO:signal", &n, &h)) return NULL; Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server, "mod_wsgi (pid=%d): Callback registration for " "signal %d ignored.", getpid(), n); Py_END_ALLOW_THREADS m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_stack"); if (o) { PyObject *log = NULL; PyObject *args = NULL; PyObject *result = NULL; Py_INCREF(o); log = newLogObject(NULL, APLOG_WARNING, NULL); args = Py_BuildValue("(OOO)", Py_None, Py_None, log); result = PyEval_CallObject(o, args); Py_XDECREF(result); Py_DECREF(args); Py_DECREF(log); Py_DECREF(o); } } Py_XDECREF(m); Py_INCREF(h); return h; } static PyMethodDef wsgi_signal_method[] = { { "signal", (PyCFunction)wsgi_signal_intercept, METH_VARARGS, 0 }, { NULL, NULL } }; /* Wrapper around Python interpreter instances. */ static const char *wsgi_python_path = NULL; static const char *wsgi_python_eggs = NULL; #if APR_HAS_THREADS static int wsgi_thread_count = 0; static apr_threadkey_t *wsgi_thread_key; #endif typedef struct { PyObject_HEAD char *name; PyInterpreterState *interp; int owner; #if APR_HAS_THREADS apr_hash_t *tstate_table; #else PyThreadState *tstate; #endif } InterpreterObject; static PyTypeObject Interpreter_Type; static InterpreterObject *newInterpreterObject(const char *name) { PyInterpreterState *interp = NULL; InterpreterObject *self = NULL; PyThreadState *tstate = NULL; PyThreadState *save_tstate = NULL; PyObject *module = NULL; PyObject *object = NULL; PyObject *item = NULL; /* Create handle for interpreter and local data. */ self = PyObject_New(InterpreterObject, &Interpreter_Type); if (self == NULL) return NULL; /* * If interpreter not named, then we want to bind * to the first Python interpreter instance created. * Give this interpreter an empty string as name. */ if (!name) { interp = PyInterpreterState_Head(); while (interp->next) interp = interp->next; name = ""; } /* Save away the interpreter name. */ self->name = strdup(name); if (interp) { /* * Interpreter provided to us so will not be * responsible for deleting it later. This will * be the case for the main Python interpreter. */ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Attach interpreter '%s'.", getpid(), name); self->interp = interp; self->owner = 0; } else { /* * Remember active thread state so can restore * it. This is actually the thread state * associated with simplified GIL state API. */ save_tstate = PyThreadState_Swap(NULL); /* * Create the interpreter. If creation of the * interpreter fails it will restore the * existing active thread state for us so don't * need to worry about it in that case. */ tstate = Py_NewInterpreter(); if (!tstate) { PyErr_SetString(PyExc_RuntimeError, "Py_NewInterpreter() failed"); Py_DECREF(self); return NULL; } Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Create interpreter '%s'.", getpid(), name); Py_END_ALLOW_THREADS self->interp = tstate->interp; self->owner = 1; } /* * Install restricted objects for STDIN and STDOUT, * or log object for STDOUT as appropriate. Don't do * this if not running on Win32 and we believe we * are running in single process mode, otherwise * it prevents use of interactive debuggers such as * the 'pdb' module. */ object = newLogObject(NULL, APLOG_ERR, "stderr"); PySys_SetObject("stderr", object); Py_DECREF(object); #ifndef WIN32 if (wsgi_parent_pid != getpid()) { #endif if (wsgi_server_config->restrict_stdout == 1) { object = (PyObject *)newRestrictedObject("sys.stdout"); PySys_SetObject("stdout", object); Py_DECREF(object); } else { object = newLogObject(NULL, APLOG_ERR, "stdout"); PySys_SetObject("stdout", object); Py_DECREF(object); } if (wsgi_server_config->restrict_stdin == 1) { object = (PyObject *)newRestrictedObject("sys.stdin"); PySys_SetObject("stdin", object); Py_DECREF(object); } #ifndef WIN32 } #endif /* * Set sys.argv to one element list to fake out * modules that look there for Python command * line arguments as appropriate. */ object = PyList_New(0); #if PY_MAJOR_VERSION >= 3 item = PyUnicode_FromString("mod_wsgi"); #else item = PyString_FromString("mod_wsgi"); #endif PyList_Append(object, item); PySys_SetObject("argv", object); Py_DECREF(item); Py_DECREF(object); /* * Install intercept for signal handler registration * if appropriate. */ if (wsgi_server_config->restrict_signal != 0) { module = PyImport_ImportModule("signal"); PyModule_AddObject(module, "signal", PyCFunction_New( &wsgi_signal_method[0], NULL)); Py_DECREF(module); } /* * Force loading of codecs into interpreter. This has to be * done as not otherwise done in sub interpreters and if not * done, code running in sub interpreters can fail on some * platforms if a unicode string is added in sys.path and an * import then done. */ item = PyCodec_Encoder("ascii"); Py_XDECREF(item); /* * If running in daemon process, override as appropriate * the USER, USERNAME or LOGNAME environment variables * so that they match the user that the process is running * as. Need to do this else we inherit the value from the * Apache parent process which is likely wrong as will be * root or the user than ran sudo when Apache started. * Can't update these for normal Apache child processes * as that would change the expected environment of other * Apache modules. */ #ifndef WIN32 if (wsgi_daemon_pool) { module = PyImport_ImportModule("os"); if (module) { PyObject *dict = NULL; PyObject *key = NULL; PyObject *value = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "environ"); if (object) { struct passwd *pwent; pwent = getpwuid(geteuid()); if (getenv("USER")) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("USER"); value = PyUnicode_Decode(pwent->pw_name, strlen(pwent->pw_name), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("USER"); value = PyString_FromString(pwent->pw_name); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } if (getenv("USERNAME")) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("USERNAME"); value = PyUnicode_Decode(pwent->pw_name, strlen(pwent->pw_name), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("USERNAME"); value = PyString_FromString(pwent->pw_name); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } if (getenv("LOGNAME")) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("LOGNAME"); value = PyUnicode_Decode(pwent->pw_name, strlen(pwent->pw_name), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("LOGNAME"); value = PyString_FromString(pwent->pw_name); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } } Py_DECREF(module); } } #endif /* * If running in daemon process, override HOME environment * variable so that is matches the home directory of the * user that the process is running as. Need to do this as * Apache will inherit HOME from root user or user that ran * sudo and started Apache and this would be wrong. Can't * update HOME for normal Apache child processes as that * would change the expected environment of other Apache * modules. */ #ifndef WIN32 if (wsgi_daemon_pool) { module = PyImport_ImportModule("os"); if (module) { PyObject *dict = NULL; PyObject *key = NULL; PyObject *value = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "environ"); if (object) { struct passwd *pwent; pwent = getpwuid(geteuid()); #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("HOME"); value = PyUnicode_Decode(pwent->pw_dir, strlen(pwent->pw_dir), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("HOME"); value = PyString_FromString(pwent->pw_dir); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } Py_DECREF(module); } } #endif /* * Explicitly override the PYTHON_EGG_CACHE variable if it * was defined by Apache configuration. For embedded processes * this would have been done by using WSGIPythonEggs directive. * For daemon processes the 'python-eggs' option to the * WSGIDaemonProcess directive would have needed to be used. */ if (!wsgi_daemon_pool) wsgi_python_eggs = wsgi_server_config->python_eggs; if (wsgi_python_eggs) { module = PyImport_ImportModule("os"); if (module) { PyObject *dict = NULL; PyObject *key = NULL; PyObject *value = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "environ"); if (object) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("PYTHON_EGG_CACHE"); value = PyUnicode_Decode(wsgi_python_eggs, strlen(wsgi_python_eggs), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("PYTHON_EGG_CACHE"); value = PyString_FromString(wsgi_python_eggs); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } Py_DECREF(module); } } /* * Install user defined Python module search path. This is * added using site.addsitedir() so that any Python .pth * files are opened and additional directories so defined * are added to default Python search path as well. This * allows virtual Python environments to work. Note that * site.addsitedir() adds new directories at the end of * sys.path when they really need to be added in order at * the start. We therefore need to do a fiddle and shift * any newly added directories to the start of sys.path. */ if (!wsgi_daemon_pool) wsgi_python_path = wsgi_server_config->python_path; if (wsgi_python_path) { PyObject *path = NULL; module = PyImport_ImportModule("site"); path = PySys_GetObject("path"); if (module && path) { PyObject *dict = NULL; PyObject *old = NULL; PyObject *new = NULL; PyObject *tmp = NULL; PyObject *item = NULL; int i = 0; old = PyList_New(0); new = PyList_New(0); tmp = PyList_New(0); for (i=0; i= 3 item = PyUnicode_Decode(start, end-start, Py_FileSystemDefaultEncoding, "surrogateescape"); #else item = PyString_FromStringAndSize(start, end-start); #endif start = end+1; value = PyString_AsString(item); Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Adding '%s' to " "path.", getpid(), value); Py_END_ALLOW_THREADS args = Py_BuildValue("(O)", item); result = PyEval_CallObject(object, args); if (!result) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Call to " "'site.addsitedir()' failed for '%s', " "stopping.", getpid(), value); Py_END_ALLOW_THREADS } Py_XDECREF(result); Py_DECREF(item); Py_DECREF(args); end = strchr(start, DELIM); while (result && end) { #if PY_MAJOR_VERSION >= 3 item = PyUnicode_Decode(start, end-start, Py_FileSystemDefaultEncoding, "surrogateescape"); #else item = PyString_FromStringAndSize(start, end-start); #endif start = end+1; value = PyString_AsString(item); Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Adding '%s' to " "path.", getpid(), value); Py_END_ALLOW_THREADS args = Py_BuildValue("(O)", item); result = PyEval_CallObject(object, args); if (!result) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): " "Call to 'site.addsitedir()' failed " "for '%s', stopping.", getpid(), value); Py_END_ALLOW_THREADS } Py_XDECREF(result); Py_DECREF(item); Py_DECREF(args); end = strchr(start, DELIM); } } Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Adding '%s' to " "path.", getpid(), start); Py_END_ALLOW_THREADS args = Py_BuildValue("(s)", start); result = PyEval_CallObject(object, args); if (!result) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Call to " "'site.addsitedir()' failed for '%s'.", getpid(), start); Py_END_ALLOW_THREADS } Py_XDECREF(result); Py_DECREF(args); Py_DECREF(object); } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Unable to locate " "'site.addsitedir()'.", getpid()); Py_END_ALLOW_THREADS } for (i=0; i= 3 PyModule_AddObject(module, "process_group", PyUnicode_DecodeLatin1(wsgi_daemon_group, strlen(wsgi_daemon_group), NULL)); PyModule_AddObject(module, "application_group", PyUnicode_DecodeLatin1(name, strlen(name), NULL)); #else PyModule_AddObject(module, "process_group", PyString_FromString(wsgi_daemon_group)); PyModule_AddObject(module, "application_group", PyString_FromString(name)); #endif Py_DECREF(module); /* * Create 'apache' Python module. If this is not a daemon * process and it is the first interpreter created by * Python, we first try and import an external Python module * of the same name. The intent is that this external module * would provide the SWIG bindings for the internal Apache * APIs. Only support use of such bindings in the first * interpreter created due to threading issues in SWIG * generated. */ module = NULL; if (!wsgi_daemon_pool) { module = PyImport_ImportModule("apache"); if (!module) { PyObject *modules = NULL; modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, "apache"); if (module) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Unable to import " "'apache' extension module.", getpid()); Py_END_ALLOW_THREADS PyErr_Print(); PyDict_DelItemString(modules, "apache"); module = NULL; } PyErr_Clear(); } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Imported 'apache'.", getpid()); Py_END_ALLOW_THREADS } } if (!module) { module = PyImport_AddModule("apache"); Py_INCREF(module); } /* * Add Apache version information to the Python 'apache' * module. */ PyModule_AddObject(module, "version", Py_BuildValue("(ii)", AP_SERVER_MAJORVERSION_NUMBER, AP_SERVER_MINORVERSION_NUMBER)); Py_DECREF(module); /* * Restore previous thread state. Only need to do * this where had to create a new interpreter. This * is basically anything except the first Python * interpreter instance. We need to restore it in * these cases as came into the function holding the * simplified GIL state for this thread but creating * the interpreter has resulted in a new thread * state object being created bound to the newly * created interpreter. In doing this though we want * to cache the thread state object which has been * created when interpreter is created. This is so * it can be reused later ensuring that thread local * data persists between requests. */ if (self->owner) { #if APR_HAS_THREADS int thread_id = 0; int *thread_handle = NULL; self->tstate_table = apr_hash_make(wsgi_server->process->pool); apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key); if (!thread_handle) { thread_id = wsgi_thread_count++; thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool, &thread_id, sizeof(thread_id)); apr_threadkey_private_set(thread_handle, wsgi_thread_key); } else { thread_id = *thread_handle; } if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Bind thread state for " "thread %d against interpreter '%s'.", getpid(), thread_id, self->name); } apr_hash_set(self->tstate_table, thread_handle, sizeof(*thread_handle), tstate); PyThreadState_Swap(save_tstate); #else self->tstate = tstate; PyThreadState_Swap(save_tstate); #endif } return self; } static void Interpreter_dealloc(InterpreterObject *self) { PyThreadState *tstate = NULL; PyObject *exitfunc = NULL; PyObject *module = NULL; PyThreadState *tstate_enter = NULL; /* * We should always enter here with the Python GIL * held and an active thread state. This should only * now occur when shutting down interpreter and not * when releasing interpreter as don't support * recyling of interpreters within the process. Thus * the thread state should be that for the main * Python interpreter. Where dealing with a named * sub interpreter, we need to change the thread * state to that which was originally used to create * that sub interpreter before doing anything. */ tstate_enter = PyThreadState_Get(); if (*self->name) { #if APR_HAS_THREADS int thread_id = 0; int *thread_handle = NULL; apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key); if (!thread_handle) { thread_id = wsgi_thread_count++; thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool, &thread_id, sizeof(thread_id)); apr_threadkey_private_set(thread_handle, wsgi_thread_key); } else { thread_id = *thread_handle; } tstate = apr_hash_get(self->tstate_table, &thread_id, sizeof(thread_id)); if (!tstate) { tstate = PyThreadState_New(self->interp); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Create thread state for " "thread %d against interpreter '%s'.", getpid(), thread_id, self->name); } apr_hash_set(self->tstate_table, thread_handle, sizeof(*thread_handle), tstate); } #else tstate = self->tstate; #endif /* * Swap to interpreter thread state that was used when * the sub interpreter was created. */ PyThreadState_Swap(tstate); } if (self->owner) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Destroy interpreter '%s'.", getpid(), self->name); Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Cleanup interpreter '%s'.", getpid(), self->name); Py_END_ALLOW_THREADS } /* * Because the thread state we are using was created outside * of any Python code and is not the same as the Python main * thread, there is no record of it within the 'threading' * module. We thus need to access current thread function of * the 'threading' module to force it to create a thread * handle for the thread. If we do not do this, then the * 'threading' modules exit function will always fail * because it will not be able to find a handle for this * thread. */ module = PyImport_ImportModule("threading"); if (!module) PyErr_Clear(); if (module) { PyObject *dict = NULL; PyObject *func = NULL; dict = PyModule_GetDict(module); #if PY_MAJOR_VERSION >= 3 func = PyDict_GetItemString(dict, "current_thread"); #else func = PyDict_GetItemString(dict, "currentThread"); #endif if (func) { PyObject *res = NULL; Py_INCREF(func); res = PyEval_CallObject(func, (PyObject *)NULL); if (!res) { PyErr_Clear(); } Py_XDECREF(res); Py_DECREF(func); } } /* * In Python 2.5.1 an exit function is no longer used to * shutdown and wait on non daemon threads which were created * from Python code. Instead, in Py_Main() it explicitly * calls 'threading._shutdown()'. Thus need to emulate this * behaviour for those versions. */ if (module) { PyObject *dict = NULL; PyObject *func = NULL; dict = PyModule_GetDict(module); func = PyDict_GetItemString(dict, "_shutdown"); if (func) { PyObject *res = NULL; Py_INCREF(func); res = PyEval_CallObject(func, (PyObject *)NULL); if (res == NULL) { PyObject *m = NULL; PyObject *result = NULL; PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Exception occurred within " "threading._shutdown().", getpid()); Py_END_ALLOW_THREADS PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); if (!value) { value = Py_None; Py_INCREF(value); } if (!traceback) { traceback = Py_None; Py_INCREF(traceback); } m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_exception"); if (o) { PyObject *log = NULL; PyObject *args = NULL; Py_INCREF(o); log = newLogObject(NULL, APLOG_ERR, NULL); args = Py_BuildValue("(OOOOO)", type, value, traceback, Py_None, log); result = PyEval_CallObject(o, args); Py_DECREF(args); Py_DECREF(log); Py_DECREF(o); } } if (!result) { /* * If can't output exception and traceback then * use PyErr_Print to dump out details of the * exception. For SystemExit though if we do * that the process will actually be terminated * so can only clear the exception information * and keep going. */ PyErr_Restore(type, value, traceback); if (!PyErr_ExceptionMatches(PyExc_SystemExit)) { PyErr_Print(); PyErr_Clear(); } else { PyErr_Clear(); } } else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); } Py_XDECREF(result); Py_XDECREF(m); } Py_XDECREF(res); Py_DECREF(func); } } /* Finally done with 'threading' module. */ Py_XDECREF(module); /* * Invoke exit functions by calling sys.exitfunc() for * Python 2.X and atexit._run_exitfuncs() for Python 3.X. * Note that in Python 3.X we can't call this on main Python * interpreter as for Python 3.X it doesn't deregister * functions as called, so have no choice but to rely on * Py_Finalize() to do it for the main interpreter. Now * that simplified GIL state API usage sorted out, this * should be okay. */ module = NULL; #if PY_MAJOR_VERSION >= 3 if (self->owner) { module = PyImport_ImportModule("atexit"); if (module) { PyObject *dict = NULL; dict = PyModule_GetDict(module); exitfunc = PyDict_GetItemString(dict, "_run_exitfuncs"); } else PyErr_Clear(); } #else exitfunc = PySys_GetObject("exitfunc"); #endif if (exitfunc) { PyObject *res = NULL; Py_INCREF(exitfunc); PySys_SetObject("exitfunc", (PyObject *)NULL); res = PyEval_CallObject(exitfunc, (PyObject *)NULL); if (res == NULL) { PyObject *m = NULL; PyObject *result = NULL; PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; if (PyErr_ExceptionMatches(PyExc_SystemExit)) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): SystemExit exception " "raised by exit functions ignored.", getpid()); Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Exception occurred within " "exit functions.", getpid()); Py_END_ALLOW_THREADS } PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); if (!value) { value = Py_None; Py_INCREF(value); } if (!traceback) { traceback = Py_None; Py_INCREF(traceback); } m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_exception"); if (o) { PyObject *log = NULL; PyObject *args = NULL; Py_INCREF(o); log = newLogObject(NULL, APLOG_ERR, NULL); args = Py_BuildValue("(OOOOO)", type, value, traceback, Py_None, log); result = PyEval_CallObject(o, args); Py_DECREF(args); Py_DECREF(log); Py_DECREF(o); } } if (!result) { /* * If can't output exception and traceback then * use PyErr_Print to dump out details of the * exception. For SystemExit though if we do * that the process will actually be terminated * so can only clear the exception information * and keep going. */ PyErr_Restore(type, value, traceback); if (!PyErr_ExceptionMatches(PyExc_SystemExit)) { PyErr_Print(); PyErr_Clear(); } else { PyErr_Clear(); } } else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); } Py_XDECREF(result); Py_XDECREF(m); } Py_XDECREF(res); Py_DECREF(exitfunc); } Py_XDECREF(module); /* If we own it, we destroy it. */ if (self->owner) { /* * We need to destroy all the thread state objects * associated with the interpreter. If there are * background threads that were created then this * may well cause them to crash the next time they * try to run. Only saving grace is that we are * trying to shutdown the process. */ PyThreadState *tstate_save = tstate; PyThreadState *tstate_next = NULL; PyThreadState_Swap(NULL); tstate = tstate->interp->tstate_head; while (tstate) { tstate_next = tstate->next; if (tstate != tstate_save) { PyThreadState_Swap(tstate); PyThreadState_Clear(tstate); PyThreadState_Swap(NULL); PyThreadState_Delete(tstate); } tstate = tstate_next; } tstate = tstate_save; PyThreadState_Swap(tstate); /* Can now destroy the interpreter. */ Py_EndInterpreter(tstate); PyThreadState_Swap(tstate_enter); } free(self->name); PyObject_Del(self); } static PyTypeObject Interpreter_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Interpreter", /*tp_name*/ sizeof(InterpreterObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Interpreter_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; /* * Startup and shutdown of Python interpreter. In mod_wsgi if * the Python interpreter hasn't been initialised by another * Apache module such as mod_python, we will take control and * initialise it. Need to remember that we initialised Python * and whether done in parent or child process as when done in * the parent we also take responsibility for performing special * Python fixups after Apache is forked and child process has * run. * * Note that by default we now defer initialisation of Python * until after the fork of processes as Python 3.X by design * doesn't clean up properly when it is destroyed causing * significant memory leaks into Apache parent process on an * Apache restart. Some Python 2.X versions also have real * memory leaks but not near as much. The result of deferring * initialisation is that can't benefit from copy on write * semantics for loaded data across a fork. Each process will * therefore have higher memory requirement where Python needs * to be used. */ static int wsgi_python_initialized = 0; #if defined(MOD_WSGI_DISABLE_EMBEDDED) static int wsgi_python_required = 0; #else static int wsgi_python_required = -1; #endif static int wsgi_python_after_fork = 1; static void wsgi_python_version(void) { const char *compile = PY_VERSION; const char *dynamic = 0; dynamic = strtok((char *)Py_GetVersion(), " "); if (strcmp(compile, dynamic) != 0) { ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server, "mod_wsgi: Compiled for Python/%s.", compile); ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(0), wsgi_server, "mod_wsgi: Runtime using Python/%s.", dynamic); } } static apr_status_t wsgi_python_term() { PyInterpreterState *interp = NULL; PyThreadState *tstate = NULL; PyObject *module = NULL; ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Terminating Python.", getpid()); /* * We should be executing in the main thread again at this * point but without the GIL, so simply restore the original * thread state for that thread that we remembered when we * initialised the interpreter. */ PyEval_AcquireThread(wsgi_main_tstate); /* * Work around bug in Python 3.X whereby it will crash if * atexit imported into sub interpreter, but never imported * into main interpreter before calling Py_Finalize(). We * perform an import of atexit module and it as side effect * must be performing required initialisation. */ module = PyImport_ImportModule("atexit"); Py_XDECREF(module); /* * In Python 2.6.5 and Python 3.1.2 the shutdown of * threading was moved back into Py_Finalize() for the main * Python interpreter. Because we shutting down threading * ourselves, the second call results in errors being logged * when Py_Finalize() is called and the shutdown function * called a second time. The errors don't indicate any real * problem and the threading module ignores them anyway. * Whether we are using Python with this changed behaviour * can only be checked by looking at run time version. * Rather than try and add a dynamic check, create a fake * 'dummy_threading' module as the presence of that shuts up * the messages. It doesn't matter that the rest of the * shutdown function still runs as everything is already * stopped so doesn't do anything. */ if (!PyImport_AddModule("dummy_threading")) PyErr_Clear(); Py_Finalize(); wsgi_python_initialized = 0; ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Python has shutdown.", getpid()); return APR_SUCCESS; } #if AP_SERVER_MAJORVERSION_NUMBER < 2 static void wsgi_python_parent_cleanup(void *data) #else static apr_status_t wsgi_python_parent_cleanup(void *data) #endif { if (wsgi_parent_pid == getpid()) { /* * Destroy Python itself including the main * interpreter. If mod_python is being loaded it * is left to mod_python to destroy Python, * although it currently doesn't do so. */ if (wsgi_python_initialized) wsgi_python_term(); } #if AP_SERVER_MAJORVERSION_NUMBER >= 2 return APR_SUCCESS; #endif } static void wsgi_python_init(apr_pool_t *p) { const char *python_home = 0; #if defined(DARWIN) && (AP_SERVER_MAJORVERSION_NUMBER < 2) static int initialized = 0; #else static int initialized = 1; #endif /* Perform initialisation if required. */ if (!Py_IsInitialized() || !initialized) { /* Enable Python 3.0 migration warnings. */ #if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6 if (wsgi_server_config->py3k_warning_flag == 1) Py_Py3kWarningFlag++; #endif /* Disable writing of byte code files. */ #if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6 if (wsgi_server_config->dont_write_bytecode == 1) Py_DontWriteBytecodeFlag++; #endif /* Check for Python paths and optimisation flag. */ if (wsgi_server_config->python_optimize > 0) Py_OptimizeFlag = wsgi_server_config->python_optimize; else Py_OptimizeFlag = 0; /* Check for control options for Python warnings. */ if (wsgi_server_config->python_warnings) { apr_array_header_t *options = NULL; char **entries; int i; options = wsgi_server_config->python_warnings; entries = (char **)options->elts; for (i = 0; i < options->nelts; ++i) { #if PY_MAJOR_VERSION >= 3 wchar_t *s = NULL; int len = strlen(entries[i])+1; s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t)); #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) wsgi_utf8_to_unicode_path(s, len, entries[i]); #else mbstowcs(s, entries[i], len); #endif PySys_AddWarnOption(s); #else PySys_AddWarnOption(entries[i]); #endif } } /* Check for Python HOME being overridden. */ python_home = wsgi_server_config->python_home; #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process && wsgi_daemon_process->group->python_home) python_home = wsgi_daemon_process->group->python_home; #endif #if PY_MAJOR_VERSION >= 3 if (python_home) { wchar_t *s = NULL; int len = strlen(python_home)+1; ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Python home %s.", getpid(), python_home); s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t)); #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) wsgi_utf8_to_unicode_path(s, len, python_home); #else mbstowcs(s, python_home, len); #endif Py_SetPythonHome(s); } #else if (python_home) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Python home %s.", getpid(), python_home); Py_SetPythonHome((char *)python_home); } #endif /* * Work around bug in Python 3.1 where it will crash * when used in non console application on Windows if * stdin/stdout have been initialised and aren't null. */ #if defined(WIN32) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 3 _wputenv(L"PYTHONIOENCODING=cp1252:backslashreplace"); #endif /* Initialise Python. */ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Initializing Python.", getpid()); initialized = 1; Py_Initialize(); /* Initialise threading. */ PyEval_InitThreads(); /* * We now want to release the GIL. Before we do that * though we remember what the current thread state is. * We will use that later to restore the main thread * state when we want to cleanup interpreters on * shutdown. */ wsgi_main_tstate = PyThreadState_Get(); PyEval_ReleaseThread(wsgi_main_tstate); wsgi_python_initialized = 1; /* * Register cleanups to be performed on parent restart * or shutdown. This will destroy Python itself. */ #if AP_SERVER_MAJORVERSION_NUMBER < 2 ap_register_cleanup(p, NULL, wsgi_python_parent_cleanup, ap_null_cleanup); #else apr_pool_cleanup_register(p, NULL, wsgi_python_parent_cleanup, apr_pool_cleanup_null); #endif } } /* * Functions for acquiring and subsequently releasing desired * Python interpreter instance. When acquiring the interpreter * a new interpreter instance will be created on demand if it * is required. The Python GIL will be held on return when the * interpreter is acquired. */ #if APR_HAS_THREADS static apr_thread_mutex_t* wsgi_interp_lock = NULL; static apr_thread_mutex_t* wsgi_module_lock = NULL; #endif static PyObject *wsgi_interpreters = NULL; static InterpreterObject *wsgi_acquire_interpreter(const char *name) { PyThreadState *tstate = NULL; PyInterpreterState *interp = NULL; InterpreterObject *handle = NULL; PyGILState_STATE state; /* * In a multithreaded MPM must protect the * interpreters table. This lock is only needed to * avoid a secondary thread coming in and creating * the same interpreter if Python releases the GIL * when an interpreter is being created. */ #if APR_HAS_THREADS apr_thread_mutex_lock(wsgi_interp_lock); #endif /* * This function should never be called when the * Python GIL is held, so need to acquire it. Even * though we may need to work with a sub * interpreter, we need to acquire GIL against main * interpreter first to work with interpreter * dictionary. */ state = PyGILState_Ensure(); /* * Check if already have interpreter instance and * if not need to create one. */ handle = (InterpreterObject *)PyDict_GetItemString(wsgi_interpreters, name); if (!handle) { handle = newInterpreterObject(name); if (!handle) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server, "mod_wsgi (pid=%d): Cannot create interpreter '%s'.", getpid(), name); PyErr_Print(); PyErr_Clear(); PyGILState_Release(state); #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_interp_lock); #endif return NULL; } PyDict_SetItemString(wsgi_interpreters, name, (PyObject *)handle); } else Py_INCREF(handle); interp = handle->interp; /* * Create new thread state object. We should only be * getting called where no current active thread * state, so no need to remember the old one. When * working with the main Python interpreter always * use the simplified API for GIL locking so any * extension modules which use that will still work. */ PyGILState_Release(state); #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_interp_lock); #endif if (*name) { #if APR_HAS_THREADS int thread_id = 0; int *thread_handle = NULL; apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key); if (!thread_handle) { thread_id = wsgi_thread_count++; thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool, &thread_id, sizeof(thread_id)); apr_threadkey_private_set(thread_handle, wsgi_thread_key); } else { thread_id = *thread_handle; } tstate = apr_hash_get(handle->tstate_table, &thread_id, sizeof(thread_id)); if (!tstate) { tstate = PyThreadState_New(interp); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Create thread state for " "thread %d against interpreter '%s'.", getpid(), thread_id, handle->name); } apr_hash_set(handle->tstate_table, thread_handle, sizeof(*thread_handle), tstate); } #else tstate = handle->tstate; #endif PyEval_AcquireThread(tstate); } else { PyGILState_Ensure(); /* * When simplified GIL state API is used, the thread * local data only persists for the extent of the top * level matching ensure/release calls. We want to * extend lifetime of the thread local data beyond * that, retaining it for all requests within the one * thread for the life of the process. To do that we * need to artificially increment the reference count * for the associated thread state object. */ tstate = PyThreadState_Get(); if (tstate && tstate->gilstate_counter == 1) tstate->gilstate_counter++; } return handle; } static void wsgi_release_interpreter(InterpreterObject *handle) { PyThreadState *tstate = NULL; PyGILState_STATE state; /* * Need to release and destroy the thread state that * was created against the interpreter. This will * release the GIL. Note that it should be safe to * always assume that the simplified GIL state API * lock was originally unlocked as always calling in * from an Apache thread when we acquire the * interpreter in the first place. */ if (*handle->name) { tstate = PyThreadState_Get(); PyEval_ReleaseThread(tstate); } else PyGILState_Release(PyGILState_UNLOCKED); /* * Need to reacquire the Python GIL just so we can * decrement our reference count to the interpreter * itself. If the interpreter has since been removed * from the table of interpreters this will result * in its destruction if its the last reference. */ state = PyGILState_Ensure(); Py_DECREF(handle); PyGILState_Release(state); } /* * Code for importing a module from source by absolute path. */ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r, const char *name, int exists, const char* filename, const char *process_group, const char *application_group) { FILE *fp = NULL; PyObject *m = NULL; PyObject *co = NULL; struct _node *n = NULL; #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) apr_wchar_t wfilename[APR_PATH_MAX]; #endif if (exists) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Reloading WSGI script '%s'.", getpid(), process_group, application_group, filename); } else { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Reloading WSGI script '%s'.", getpid(), process_group, application_group, filename); } Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Loading WSGI script '%s'.", getpid(), process_group, application_group, filename); } else { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Loading WSGI script '%s'.", getpid(), process_group, application_group, filename); } Py_END_ALLOW_THREADS } #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) if (wsgi_utf8_to_unicode_path(wfilename, sizeof(wfilename) / sizeof(apr_wchar_t), filename)) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d, process='%s', " "application='%s'): Failed to convert '%s' " "to UCS2 filename.", getpid(), process_group, application_group, filename); } else { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d, process='%s', " "application='%s'): Failed to convert '%s' " "to UCS2 filename.", getpid(), process_group, application_group, filename); } Py_END_ALLOW_THREADS return NULL; } fp = _wfopen(wfilename, "r"); #else fp = fopen(filename, "r"); #endif if (!fp) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Call to fopen() failed for '%s'.", getpid(), process_group, application_group, filename); } else { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Call to fopen() failed for '%s'.", getpid(), process_group, application_group, filename); } Py_END_ALLOW_THREADS return NULL; } n = PyParser_SimpleParseFile(fp, filename, Py_file_input); fclose(fp); if (!n) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Failed to parse WSGI script file '%s'.", getpid(), process_group, application_group, filename); } else { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Failed to parse WSGI script file '%s'.", getpid(), process_group, application_group, filename); } Py_END_ALLOW_THREADS return NULL; } co = (PyObject *)PyNode_Compile(n, filename); PyNode_Free(n); if (co) m = PyImport_ExecCodeModuleEx((char *)name, co, (char *)filename); Py_XDECREF(co); if (m) { PyObject *object = NULL; if (!r || strcmp(r->filename, filename)) { #if AP_SERVER_MAJORVERSION_NUMBER < 2 struct stat finfo; if (stat(filename, &finfo) == -1) { object = PyLong_FromLongLong(0); } else { object = PyLong_FromLongLong(finfo.st_mtime); } #else apr_finfo_t finfo; if (apr_stat(&finfo, filename, APR_FINFO_NORM, pool) != APR_SUCCESS) { object = PyLong_FromLongLong(0); } else { object = PyLong_FromLongLong(finfo.mtime); } #endif } else { #if AP_SERVER_MAJORVERSION_NUMBER < 2 object = PyLong_FromLongLong(r->finfo.st_mtime); #else object = PyLong_FromLongLong(r->finfo.mtime); #endif } PyModule_AddObject(m, "__mtime__", object); } else { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Target WSGI script '%s' cannot " "be loaded as Python module.", getpid(), filename); } else { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Target WSGI script '%s' cannot " "be loaded as Python module.", getpid(), filename); } Py_END_ALLOW_THREADS wsgi_log_python_error(r, NULL, filename); } return m; } static int wsgi_reload_required(apr_pool_t *pool, request_rec *r, const char *filename, PyObject *module, const char *resource) { PyObject *dict = NULL; PyObject *object = NULL; apr_time_t mtime = 0; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "__mtime__"); if (object) { mtime = PyLong_AsLongLong(object); if (!r || strcmp(r->filename, filename)) { #if AP_SERVER_MAJORVERSION_NUMBER < 2 struct stat finfo; if (stat(filename, &finfo) == -1) { return 1; } else if (mtime != finfo.st_mtime) { return 1; } #else apr_finfo_t finfo; if (apr_stat(&finfo, filename, APR_FINFO_NORM, pool) != APR_SUCCESS) { return 1; } else if (mtime != finfo.mtime) { return 1; } #endif } else { #if AP_SERVER_MAJORVERSION_NUMBER < 2 if (mtime != r->finfo.st_mtime) return 1; #else if (mtime != r->finfo.mtime) return 1; #endif } } else return 1; if (resource) { PyObject *dict = NULL; PyObject *object = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "reload_required"); if (object) { PyObject *args = NULL; PyObject *result = NULL; Py_INCREF(object); args = Py_BuildValue("(s)", resource); result = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); if (result && PyObject_IsTrue(result)) { Py_DECREF(result); return 1; } if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, filename); Py_XDECREF(result); } } return 0; } static char *wsgi_module_name(apr_pool_t *pool, const char *filename) { char *hash = NULL; char *file = NULL; /* * Calculate a name for the module using the MD5 of its full * pathname. This is so that different code files with the * same basename are still considered unique. Note that where * we believe a case insensitive file system is being used, * we always change the file name to lower case so that use * of different case in name doesn't result in duplicate * modules being loaded for the same file. */ file = (char *)filename; if (wsgi_server_config->case_sensitivity) { file = apr_pstrdup(pool, file); ap_str_tolower(file); } hash = ap_md5(pool, (const unsigned char *)file); return apr_pstrcat(pool, "_mod_wsgi_", hash, NULL); } static int wsgi_execute_script(request_rec *r) { WSGIRequestConfig *config = NULL; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; const char *script = NULL; const char *name = NULL; int exists = 0; int status; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ interp = wsgi_acquire_interpreter(config->application_group); if (!interp) { ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), config->application_group); return HTTP_INTERNAL_SERVER_ERROR; } /* Calculate the Python module name to be used for script. */ if (config->handler_script && *config->handler_script) script = config->handler_script; else script = r->filename; name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. For a handler script will * also see if it contains a custom function for determining * if a reload should be performed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, r->filename)) { /* * Script file has changed. Discard reference to * loaded module and work out what action we are * supposed to take. Choices are process reloading * and module reloading. Process reloading cannot be * performed unless a daemon process is being used. */ Py_DECREF(module); module = NULL; #if defined(MOD_WSGI_WITH_DAEMONS) if (*config->process_group) { /* * Need to restart the daemon process. We bail * out on the request process here, sending back * a special response header indicating that * process is being restarted and that remote * end should abandon connection and attempt to * reconnect again. We also need to signal this * process so it will actually shutdown. The * process supervisor code will ensure that it * is restarted. */ Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r, "mod_wsgi (pid=%d): Force restart of " "process '%s'.", getpid(), config->process_group); Py_END_ALLOW_THREADS #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif wsgi_release_interpreter(interp); r->status = HTTP_INTERNAL_SERVER_ERROR; r->status_line = "200 Rejected"; wsgi_daemon_shutdown++; kill(getpid(), SIGINT); return OK; } else { /* * Need to reload just the script module. Remove * the module from the modules dictionary before * reloading it again. If code is executing * within the module at the time, the callers * reference count on the module should ensure * it isn't actually destroyed until it is * finished. */ PyDict_DelItemString(modules, name); } #else /* * Need to reload just the script module. Remove * the module from the modules dictionary before * reloading it again. If code is executing * within the module at the time, the callers * reference count on the module should ensure * it isn't actually destroyed until it is * finished. */ PyDict_DelItemString(modules, name); #endif } } /* * When process reloading is in use need to indicate * that request content should now be sent through. * This is done by writing a special response header * directly out onto the appropriate network output * filter. The special response is picked up by * remote end and data will then be sent. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (*config->process_group) { ap_filter_t *filters; apr_bucket_brigade *bb; apr_bucket *b; const char *data = "Status: 200 Continue\r\n\r\n"; int length = strlen(data); filters = r->output_filters; while (filters && filters->frec->ftype != AP_FTYPE_NETWORK) { filters = filters->next; } bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); b = apr_bucket_transient_create(data, length, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); /* * This should always work, so ignore any errors * from passing the brigade to the network * output filter. If there are are problems they * will be picked up further down in processing * anyway. */ ap_pass_brigade(filters, bb); } #endif /* Load module if not already loaded. */ if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, config->process_group, config->application_group); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Assume an internal server error unless everything okay. */ status = HTTP_INTERNAL_SERVER_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, config->callable_object); if (object) { AdapterObject *adapter = NULL; adapter = newAdapterObject(r); if (adapter) { PyObject *method = NULL; PyObject *args = NULL; Py_INCREF(object); status = Adapter_run(adapter, object); Py_DECREF(object); /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; adapter->input->r = NULL; /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); object = PyEval_CallObject(method, args); Py_DECREF(args); } Py_XDECREF(object); Py_XDECREF(method); #if defined(MOD_WSGI_WITH_BUCKETS) adapter->bb = NULL; #endif } Py_XDECREF((PyObject *)adapter); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Target WSGI script '%s' does " "not contain WSGI application '%s'.", getpid(), script, config->callable_object); Py_END_ALLOW_THREADS status = HTTP_NOT_FOUND; } } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, r->filename); /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } /* * Apache child process initialisation and cleanup. Initialise * global table containing Python interpreter instances and * cache reference to main interpreter. Also register cleanup * function to delete interpreter on process shutdown. */ #if AP_SERVER_MAJORVERSION_NUMBER < 2 static void wsgi_python_child_cleanup(void *data) #else static apr_status_t wsgi_python_child_cleanup(void *data) #endif { PyObject *interp = NULL; /* In a multithreaded MPM must protect table. */ #if APR_HAS_THREADS apr_thread_mutex_lock(wsgi_interp_lock); #endif /* * We should be executing in the main thread again at this * point but without the GIL, so simply restore the original * thread state for that thread that we remembered when we * initialised the interpreter. */ PyEval_AcquireThread(wsgi_main_tstate); /* * Extract a handle to the main Python interpreter from * interpreters dictionary as want to process that one last. */ interp = PyDict_GetItemString(wsgi_interpreters, ""); Py_INCREF(interp); /* * Remove all items from interpreters dictionary. This will * have side affect of calling any exit functions and * destroying interpreters we own. */ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Destroying interpreters.", getpid()); PyDict_Clear(wsgi_interpreters); #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_interp_lock); #endif /* * Now we decrement reference on handle for main Python * interpreter. This only causes exit functions to be called * and doesn't result in interpreter being destroyed as we * we didn't previously mark ourselves as the owner of the * interpreter. Note that when Python as a whole is later * being destroyed it will also call exit functions, but by * then the exit function registrations have been removed * and so they will not actually be run a second time. */ Py_DECREF(interp); /* * The code which performs actual shutdown of the main * interpreter expects to be called without the GIL, so * we release it here again. */ PyEval_ReleaseThread(wsgi_main_tstate); /* * Destroy Python itself including the main interpreter. * If mod_python is being loaded it is left to mod_python to * destroy Python, although it currently doesn't do so. */ if (wsgi_python_initialized) wsgi_python_term(); #if AP_SERVER_MAJORVERSION_NUMBER >= 2 return APR_SUCCESS; #endif } static void wsgi_python_child_init(apr_pool_t *p) { PyGILState_STATE state; PyInterpreterState *interp = NULL; PyObject *object = NULL; int thread_id = 0; int *thread_handle = NULL; /* Working with Python, so must acquire GIL. */ state = PyGILState_Ensure(); /* * Trigger any special Python stuff required after a fork. * Only do this though if we were responsible for the * initialisation of the Python interpreter in the first * place to avoid it being done multiple times. Also only * do it if Python was initialised in parent process. */ if (wsgi_python_initialized && !wsgi_python_after_fork) PyOS_AfterFork(); /* Finalise any Python objects required by child process. */ PyType_Ready(&Log_Type); PyType_Ready(&Stream_Type); PyType_Ready(&Input_Type); PyType_Ready(&Adapter_Type); PyType_Ready(&Restricted_Type); PyType_Ready(&Interpreter_Type); PyType_Ready(&Dispatch_Type); #if defined(MOD_WSGI_WITH_AAA_HANDLERS) PyType_Ready(&Auth_Type); #endif /* Initialise Python interpreter instance table and lock. */ wsgi_interpreters = PyDict_New(); #if APR_HAS_THREADS apr_thread_mutex_create(&wsgi_interp_lock, APR_THREAD_MUTEX_UNNESTED, p); apr_thread_mutex_create(&wsgi_module_lock, APR_THREAD_MUTEX_UNNESTED, p); #endif /* * Initialise the key for data related to a thread. At * the moment we only record an integer thread ID to be * used in lookup table to thread states associated with * an interprter. */ #if APR_HAS_THREADS apr_threadkey_private_create(&wsgi_thread_key, NULL, p); thread_id = wsgi_thread_count++; thread_handle = (int*)apr_pmemdup(wsgi_server->process->pool, &thread_id, sizeof(thread_id)); apr_threadkey_private_set(thread_handle, wsgi_thread_key); #endif /* * Cache a reference to the first Python interpreter * instance. This interpreter is special as some third party * Python modules will only work when used from within this * interpreter. This is generally when they use the Python * simplified GIL API or otherwise don't use threading API * properly. An empty string for name is used to identify * the first Python interpreter instance. */ object = (PyObject *)newInterpreterObject(NULL); PyDict_SetItemString(wsgi_interpreters, "", object); Py_DECREF(object); /* Restore the prior thread state and release the GIL. */ PyGILState_Release(state); /* Register cleanups to performed on process shutdown. */ #if AP_SERVER_MAJORVERSION_NUMBER < 2 ap_register_cleanup(p, NULL, wsgi_python_child_cleanup, ap_null_cleanup); #else apr_pool_cleanup_register(p, NULL, wsgi_python_child_cleanup, apr_pool_cleanup_null); #endif /* Loop through import scripts for this process and load them. */ if (wsgi_import_list) { apr_array_header_t *scripts = NULL; WSGIScriptFile *entries; WSGIScriptFile *entry; int i; scripts = wsgi_import_list; entries = (WSGIScriptFile *)scripts->elts; for (i = 0; i < scripts->nelts; ++i) { int l = 0; entry = &entries[i]; if (!strcmp(wsgi_daemon_group, entry->process_group)) { InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; interp = wsgi_acquire_interpreter(entry->application_group); if (!interp) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server, "mod_wsgi (pid=%d): Cannot acquire " "interpreter '%s'.", getpid(), entry->application_group); } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(p, entry->handler_script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module. Strictly * speaking shouldn't be required at this point. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && wsgi_server_config->script_reloading) { if (wsgi_reload_required(p, NULL, entry->handler_script, module, NULL)) { /* * Script file has changed. Only support module * reloading for dispatch scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(p, NULL, name, exists, entry->handler_script, entry->process_group, entry->application_group); if (PyErr_Occurred()) PyErr_Clear(); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); } } } } /* The processors for directives. */ static int wsgi_parse_option(apr_pool_t *p, const char **line, const char **name, const char **value) { const char *str = *line, *strend; while (*str && apr_isspace(*str)) ++str; if (!*str || *str == '=') { *line = str; return !APR_SUCCESS; } /* Option must be of form name=value. Extract the name. */ strend = str; while (*strend && *strend != '=' && !apr_isspace(*strend)) ++strend; if (*strend != '=') { *line = str; return !APR_SUCCESS; } *name = apr_pstrndup(p, str, strend-str); *line = strend+1; /* Now extract the value. Note that value can be quoted. */ *value = ap_getword_conf(p, line); return APR_SUCCESS; } static const char *wsgi_add_script_alias(cmd_parms *cmd, void *mconfig, const char *args) { const char *l = NULL; const char *a = NULL; WSGIServerConfig *sconfig = NULL; WSGIAliasEntry *entry = NULL; const char *option = NULL; const char *value = NULL; #if defined(MOD_WSGI_WITH_DAEMONS) const char *process_group = NULL; #else const char *process_group = ""; #endif const char *application_group = NULL; const char *callable_object = NULL; int pass_authorization = -1; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (!sconfig->alias_list) { sconfig->alias_list = apr_array_make(sconfig->pool, 20, sizeof(WSGIAliasEntry)); } l = ap_getword_conf(cmd->pool, &args); if (*l == '\0' || *args == 0) { return apr_pstrcat(cmd->pool, cmd->cmd->name, " requires at least two arguments", cmd->cmd->errmsg ? ", " : NULL, cmd->cmd->errmsg, NULL); } a = ap_getword_conf(cmd->pool, &args); if (*a == '\0') { return apr_pstrcat(cmd->pool, cmd->cmd->name, " requires at least two arguments", cmd->cmd->errmsg ? ", " : NULL, cmd->cmd->errmsg, NULL); } while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI script alias definition."; } if (!cmd->info && !strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; if (!strcmp(value, "%{GLOBAL}")) value = ""; application_group = value; } #if defined(MOD_WSGI_WITH_DAEMONS) else if (!cmd->info && !strcmp(option, "process-group")) { if (!*value) return "Invalid name for WSGI process group."; if (!strcmp(value, "%{GLOBAL}")) value = ""; process_group = value; } #endif else if (!strcmp(option, "callable-object")) { if (!*value) return "Invalid name for WSGI callable object."; callable_object = value; } else if (!strcmp(option, "pass-authorization")) { if (!*value) return "Invalid value for authorization flag."; if (strcasecmp(value, "Off") == 0) pass_authorization = 0; else if (strcasecmp(value, "On") == 0) pass_authorization = 1; else return "Invalid value for authorization flag."; } else return "Invalid option to WSGI script alias definition."; } entry = (WSGIAliasEntry *)apr_array_push(sconfig->alias_list); if (cmd->info) { entry->regexp = ap_pregcomp(cmd->pool, l, AP_REG_EXTENDED); if (!entry->regexp) return "Regular expression could not be compiled."; } entry->location = l; entry->application = a; entry->process_group = process_group; entry->application_group = application_group; entry->callable_object = callable_object; entry->pass_authorization = pass_authorization; if (process_group && application_group && !strstr(process_group, "%{") && !strstr(application_group, "%{")) { WSGIScriptFile *object = NULL; if (!wsgi_import_list) { wsgi_import_list = apr_array_make(sconfig->pool, 20, sizeof(WSGIScriptFile)); } object = (WSGIScriptFile *)apr_array_push(wsgi_import_list); object->handler_script = a; object->process_group = process_group; object->application_group = application_group; #if defined(MOD_WSGI_WITH_DAEMONS) if (*object->process_group) { WSGIProcessGroup *group = NULL; WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i; if (!wsgi_daemon_list) return "WSGI process group not yet configured."; entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (!strcmp(entry->name, object->process_group)) { group = entry; break; } } if (!group) return "WSGI process group not yet configured."; if (group->server != cmd->server && group->server->is_virtual) return "WSGI process group not accessible."; } #endif } return NULL; } static const char *wsgi_set_verbose_debugging(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->verbose_debugging = 0; else if (strcasecmp(f, "On") == 0) sconfig->verbose_debugging = 1; else return "WSGIVerboseDebugging must be one of: Off | On"; return NULL; } static const char *wsgi_set_lazy_initialization(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; if (strcasecmp(f, "Off") == 0) wsgi_python_after_fork = 0; else if (strcasecmp(f, "On") == 0) wsgi_python_after_fork = 1; else return "WSGILazyInitialization must be one of: Off | On"; return NULL; } static const char *wsgi_add_python_warnings(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; char **entry = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (!sconfig->python_warnings) { sconfig->python_warnings = apr_array_make(sconfig->pool, 5, sizeof(char*)); } entry = (char **)apr_array_push(sconfig->python_warnings); *entry = apr_pstrdup(sconfig->pool, f); return NULL; } static const char *wsgi_set_py3k_warning_flag(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->py3k_warning_flag = 0; else if (strcasecmp(f, "On") == 0) sconfig->py3k_warning_flag = 1; else return "WSGIPy3kWarningFlag must be one of: Off | On"; return NULL; } static const char *wsgi_set_python_optimize(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_optimize = atoi(f); return NULL; } static const char *wsgi_set_python_home(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_home = f; return NULL; } static const char *wsgi_set_python_path(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_path = f; return NULL; } static const char *wsgi_set_python_eggs(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_eggs = f; return NULL; } static const char *wsgi_set_restrict_embedded(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->restrict_embedded = 0; else if (strcasecmp(f, "On") == 0) sconfig->restrict_embedded = 1; else return "WSGIRestrictEmbedded must be one of: Off | On"; if (sconfig->restrict_embedded) { if (wsgi_python_required == -1) wsgi_python_required = 0; } return NULL; } static const char *wsgi_set_restrict_stdin(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->restrict_stdin = 0; else if (strcasecmp(f, "On") == 0) sconfig->restrict_stdin = 1; else return "WSGIRestrictStdin must be one of: Off | On"; return NULL; } static const char *wsgi_set_restrict_stdout(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->restrict_stdout = 0; else if (strcasecmp(f, "On") == 0) sconfig->restrict_stdout = 1; else return "WSGIRestrictStdout must be one of: Off | On"; return NULL; } static const char *wsgi_set_restrict_signal(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->restrict_signal = 0; else if (strcasecmp(f, "On") == 0) sconfig->restrict_signal = 1; else return "WSGIRestrictSignal must be one of: Off | On"; return NULL; } static const char *wsgi_set_case_sensitivity(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->case_sensitivity = 0; else if (strcasecmp(f, "On") == 0) sconfig->case_sensitivity = 1; else return "WSGICaseSensitivity must be one of: Off | On"; return NULL; } static const char *wsgi_set_restrict_process(cmd_parms *cmd, void *mconfig, const char *args) { apr_table_t *index = apr_table_make(cmd->pool, 5); if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->restrict_process = index; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->restrict_process = index; } while (*args) { const char *option; option = ap_getword_conf(cmd->pool, &args); if (!strcmp(option, "%{GLOBAL}")) option = ""; apr_table_setn(index, option, option); } return NULL; } static const char *wsgi_set_process_group(cmd_parms *cmd, void *mconfig, const char *n) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->process_group = n; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->process_group = n; } return NULL; } static const char *wsgi_set_application_group(cmd_parms *cmd, void *mconfig, const char *n) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->application_group = n; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->application_group = n; } return NULL; } static const char *wsgi_set_callable_object(cmd_parms *cmd, void *mconfig, const char *n) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->callable_object = n; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->callable_object = n; } return NULL; } static const char *wsgi_add_import_script(cmd_parms *cmd, void *mconfig, const char *args) { const char *error = NULL; WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; if (!wsgi_import_list) { wsgi_import_list = apr_array_make(cmd->pool, 20, sizeof(WSGIScriptFile)); } object = (WSGIScriptFile *)apr_array_push(wsgi_import_list); object->handler_script = ap_getword_conf(cmd->pool, &args); object->process_group = NULL; object->application_group = NULL; if (!object->handler_script || !*object->handler_script) return "Location of import script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI import script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } #if defined(MOD_WSGI_WITH_DAEMONS) else if (!strcmp(option, "process-group")) { if (!*value) return "Invalid name for WSGI process group."; object->process_group = value; } #endif else return "Invalid option to WSGI import script definition."; } if (!object->application_group) return "Name of WSGI application group required."; if (!strcmp(object->application_group, "%{GLOBAL}")) object->application_group = ""; #if defined(MOD_WSGI_WITH_DAEMONS) if (!object->process_group) return "Name of WSGI process group required."; if (!strcmp(object->process_group, "%{GLOBAL}")) object->process_group = ""; if (*object->process_group) { WSGIProcessGroup *group = NULL; WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i; if (!wsgi_daemon_list) return "WSGI process group not yet configured."; entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (!strcmp(entry->name, object->process_group)) { group = entry; break; } } if (!group) return "WSGI process group not yet configured."; if (group->server != cmd->server && group->server->is_virtual) return "WSGI process group not accessible."; } #else object->process_group = ""; #endif if (!*object->process_group) wsgi_python_required = 1; return NULL; } static const char *wsgi_set_dispatch_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of dispatch script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI dispatch script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else return "Invalid option to WSGI dispatch script definition."; } if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->dispatch_script = object; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->dispatch_script = object; } wsgi_python_required = 1; return NULL; } static const char *wsgi_set_pass_apache_request(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->pass_apache_request = 0; else if (strcasecmp(f, "On") == 0) dconfig->pass_apache_request = 1; else return "WSGIPassApacheRequest must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->pass_apache_request = 0; else if (strcasecmp(f, "On") == 0) sconfig->pass_apache_request = 1; else return "WSGIPassApacheRequest must be one of: Off | On"; } return NULL; } static const char *wsgi_set_pass_authorization(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->pass_authorization = 0; else if (strcasecmp(f, "On") == 0) dconfig->pass_authorization = 1; else return "WSGIPassAuthorization must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->pass_authorization = 0; else if (strcasecmp(f, "On") == 0) sconfig->pass_authorization = 1; else return "WSGIPassAuthorization must be one of: Off | On"; } return NULL; } static const char *wsgi_set_script_reloading(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->script_reloading = 0; else if (strcasecmp(f, "On") == 0) dconfig->script_reloading = 1; else return "WSGIScriptReloading must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->script_reloading = 0; else if (strcasecmp(f, "On") == 0) sconfig->script_reloading = 1; else return "WSGIScriptReloading must be one of: Off | On"; } return NULL; } static const char *wsgi_set_error_override(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->error_override = 0; else if (strcasecmp(f, "On") == 0) dconfig->error_override = 1; else return "WSGIErrorOverride must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->error_override = 0; else if (strcasecmp(f, "On") == 0) sconfig->error_override = 1; else return "WSGIErrorOverride must be one of: Off | On"; } return NULL; } static const char *wsgi_set_chunked_request(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->chunked_request = 0; else if (strcasecmp(f, "On") == 0) dconfig->chunked_request = 1; else return "WSGIChunkedRequest must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->chunked_request = 0; else if (strcasecmp(f, "On") == 0) sconfig->chunked_request = 1; else return "WSGIChunkedRequest must be one of: Off | On"; } return NULL; } static const char *wsgi_set_enable_sendfile(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->enable_sendfile = 0; else if (strcasecmp(f, "On") == 0) dconfig->enable_sendfile = 1; else return "WSGIEnableSendfile must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->enable_sendfile = 0; else if (strcasecmp(f, "On") == 0) sconfig->enable_sendfile = 1; else return "WSGIEnableSendfile must be one of: Off | On"; } return NULL; } static const char *wsgi_set_access_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIDirectoryConfig *dconfig = NULL; WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of access script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI access script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else return "Invalid option to WSGI access script definition."; } dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->access_script = object; wsgi_python_required = 1; return NULL; } static const char *wsgi_set_auth_user_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIDirectoryConfig *dconfig = NULL; WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of auth user script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI auth user script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else return "Invalid option to WSGI auth user script definition."; } dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->auth_user_script = object; wsgi_python_required = 1; return NULL; } static const char *wsgi_set_auth_group_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIDirectoryConfig *dconfig = NULL; WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of auth group script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI auth group script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else return "Invalid option to WSGI auth group script definition."; } dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->auth_group_script = object; wsgi_python_required = 1; return NULL; } static const char *wsgi_set_user_authoritative(cmd_parms *cmd, void *mconfig, const char *f) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->user_authoritative = 0; else if (strcasecmp(f, "On") == 0) dconfig->user_authoritative = 1; else return "WSGIUserAuthoritative must be one of: Off | On"; return NULL; } static const char *wsgi_set_group_authoritative(cmd_parms *cmd, void *mconfig, const char *f) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->group_authoritative = 0; else if (strcasecmp(f, "On") == 0) dconfig->group_authoritative = 1; else return "WSGIGroupAuthoritative must be one of: Off | On"; return NULL; } #if AP_SERVER_MAJORVERSION_NUMBER >= 2 static const char *wsgi_add_handler_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIServerConfig *sconfig = NULL; WSGIDirectoryConfig *dconfig = NULL; WSGIScriptFile *object = NULL; const char *name = NULL; const char *option = NULL; const char *value = NULL; name = ap_getword_conf(cmd->pool, &args); if (!name || !*name) return "Name for handler script not supplied."; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of handler script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI handler script definition."; } if (!strcmp(option, "process-group")) { if (!*value) return "Invalid name for WSGI process group."; object->process_group = value; } else if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else if (!strcmp(option, "pass-authorization")) { if (!*value) return "Invalid value for authorization flag."; if (strcasecmp(value, "Off") == 0) object->pass_authorization = "0"; else if (strcasecmp(value, "On") == 0) object->pass_authorization = "1"; else return "Invalid value for authorization flag."; } else return "Invalid option to WSGI handler script definition."; } if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (!dconfig->handler_scripts) dconfig->handler_scripts = apr_hash_make(cmd->pool); apr_hash_set(dconfig->handler_scripts, name, APR_HASH_KEY_STRING, object); } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (!sconfig->handler_scripts) sconfig->handler_scripts = apr_hash_make(cmd->pool); apr_hash_set(sconfig->handler_scripts, name, APR_HASH_KEY_STRING, object); } return NULL; } static const char *wsgi_set_dont_write_bytecode(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->dont_write_bytecode = 0; else if (strcasecmp(f, "On") == 0) sconfig->dont_write_bytecode = 1; else return "WSGIDontWriteBytecode must be one of: Off | On"; return NULL; } #endif /* Handler for the translate name phase. */ static int wsgi_alias_matches(const char *uri, const char *alias_fakename) { /* Code for this function from Apache mod_alias module. */ const char *aliasp = alias_fakename, *urip = uri; while (*aliasp) { if (*aliasp == '/') { /* any number of '/' in the alias matches any number in * the supplied URI, but there must be at least one... */ if (*urip != '/') return 0; do { ++aliasp; } while (*aliasp == '/'); do { ++urip; } while (*urip == '/'); } else { /* Other characters are compared literally */ if (*urip++ != *aliasp++) return 0; } } /* Check last alias path component matched all the way */ if (aliasp[-1] != '/' && *urip != '\0' && *urip != '/') return 0; /* Return number of characters from URI which matched (may be * greater than length of alias, since we may have matched * doubled slashes) */ return urip - uri; } static int wsgi_hook_intercept(request_rec *r) { WSGIServerConfig *config = NULL; apr_array_header_t *aliases = NULL; WSGIAliasEntry *entries = NULL; WSGIAliasEntry *entry = NULL; ap_regmatch_t matches[AP_MAX_REG_MATCH]; const char *location = NULL; const char *application = NULL; int i = 0; config = ap_get_module_config(r->server->module_config, &wsgi_module); if (!config->alias_list) return DECLINED; if (r->uri[0] != '/' && r->uri[0]) return DECLINED; aliases = config->alias_list; entries = (WSGIAliasEntry *)aliases->elts; for (i = 0; i < aliases->nelts; ++i) { int l = 0; entry = &entries[i]; if (entry->regexp) { if (!ap_regexec(entry->regexp, r->uri, AP_MAX_REG_MATCH, matches, 0)) { if (entry->application) { l = matches[0].rm_eo; location = apr_pstrndup(r->pool, r->uri, l); application = ap_pregsub(r->pool, entry->application, r->uri, AP_MAX_REG_MATCH, matches); } } } else if (entry->location) { l = wsgi_alias_matches(r->uri, entry->location); location = entry->location; application = entry->application; } if (l > 0) { if (!strcmp(location, "/")) { r->filename = apr_pstrcat(r->pool, application, r->uri, NULL); } else { r->filename = apr_pstrcat(r->pool, application, r->uri + l, NULL); } r->handler = "wsgi-script"; apr_table_setn(r->notes, "alias-forced-type", r->handler); if (entry->process_group) { apr_table_setn(r->notes, "mod_wsgi.process_group", entry->process_group); } if (entry->application_group) { apr_table_setn(r->notes, "mod_wsgi.application_group", entry->application_group); } if (entry->callable_object) { apr_table_setn(r->notes, "mod_wsgi.callable_object", entry->callable_object); } if (entry->pass_authorization == 0) apr_table_setn(r->notes, "mod_wsgi.pass_authorization", "0"); else if (entry->pass_authorization == 1) apr_table_setn(r->notes, "mod_wsgi.pass_authorization", "1"); return OK; } } return DECLINED; } /* Handler for the response handler phase. */ static void wsgi_log_script_error(request_rec *r, const char *e, const char *n) { char *message = NULL; if (!n) n = r->filename; message = apr_psprintf(r->pool, "%s: %s", e, n); ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "%s", message); } static void wsgi_build_environment(request_rec *r) { WSGIRequestConfig *config = NULL; const char *value = NULL; const char *script_name = NULL; const char *path_info = NULL; conn_rec *c = r->connection; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); /* Populate environment with standard CGI variables. */ ap_add_cgi_vars(r); ap_add_common_vars(r); /* * Mutate a HEAD request into a GET request. This is * required because WSGI specification doesn't lay out * clearly how WSGI applications should treat a HEAD * request. Generally authors of WSGI applications or * frameworks take it that they do not need to return any * content, but this screws up any Apache output filters * which need to see all the response content in order to * correctly set up response headers for a HEAD request such * that they are the same as a GET request. Thus change a * HEAD request into a GET request to ensure that request * content is generated. If using Apache 2.X we can skip * doing this if we know there is no output filter that * might change the content and/or headers. */ #if AP_SERVER_MAJORVERSION_NUMBER >= 2 if (r->method_number == M_GET && r->header_only && r->output_filters->frec->ftype < AP_FTYPE_PROTOCOL) apr_table_setn(r->subprocess_env, "REQUEST_METHOD", "GET"); #else if (r->method_number == M_GET && r->header_only) apr_table_setn(r->subprocess_env, "REQUEST_METHOD", "GET"); #endif /* Determine whether connection uses HTTPS protocol. */ #if AP_SERVER_MAJORVERSION_NUMBER >= 2 if (!wsgi_is_https) wsgi_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (wsgi_is_https && wsgi_is_https(r->connection)) apr_table_set(r->subprocess_env, "HTTPS", "1"); #endif /* * If enabled, pass along authorisation headers which Apache * leaves out of CGI environment. WSGI still needs to see * these if it needs to implement any of the standard * authentication schemes such as Basic and Digest. We do * not pass these through by default though as it can result * in passwords being leaked though to a WSGI application * when it shouldn't. This would be a problem where there is * some sort of site wide authorisation scheme in place * which has got nothing to do with specific applications. */ if (config->pass_authorization) { value = apr_table_get(r->headers_in, "Authorization"); if (value) apr_table_setn(r->subprocess_env, "HTTP_AUTHORIZATION", value); } /* If PATH_INFO not set, set it to an empty string. */ value = apr_table_get(r->subprocess_env, "PATH_INFO"); if (!value) apr_table_setn(r->subprocess_env, "PATH_INFO", ""); /* * Multiple slashes are not always collapsed into a single * slash in SCRIPT_NAME and PATH_INFO with Apache 1.3 and * Apache 2.X behaving a bit differently. Because some WSGI * applications don't deal with multiple slashes properly we * collapse any duplicate slashes to a single slash so * Apache behaviour is consistent across all versions. We * don't care that PATH_TRANSLATED can on Apache 1.3 still * contain multiple slashes as that should not be getting * used from a WSGI application anyway. */ script_name = apr_table_get(r->subprocess_env, "SCRIPT_NAME"); if (*script_name) { while (*script_name && (*(script_name+1) == '/')) script_name++; script_name = apr_pstrdup(r->pool, script_name); ap_no2slash((char*)script_name); apr_table_setn(r->subprocess_env, "SCRIPT_NAME", script_name); } path_info = apr_table_get(r->subprocess_env, "PATH_INFO"); if (*path_info) { while (*path_info && (*(path_info+1) == '/')) path_info++; path_info = apr_pstrdup(r->pool, path_info); ap_no2slash((char*)path_info); apr_table_setn(r->subprocess_env, "PATH_INFO", path_info); } /* * Set values specific to mod_wsgi configuration. These control * aspects of how a request is managed but don't strictly need * to be passed through to the application itself. It is though * easier to set them here as then they are carried across to * the daemon process as part of the environment where they can * be extracted and used. */ apr_table_setn(r->subprocess_env, "mod_wsgi.process_group", config->process_group); apr_table_setn(r->subprocess_env, "mod_wsgi.application_group", config->application_group); apr_table_setn(r->subprocess_env, "mod_wsgi.callable_object", config->callable_object); apr_table_setn(r->subprocess_env, "mod_wsgi.request_handler", r->handler); apr_table_setn(r->subprocess_env, "mod_wsgi.handler_script", config->handler_script); apr_table_setn(r->subprocess_env, "mod_wsgi.script_reloading", apr_psprintf(r->pool, "%d", config->script_reloading)); #if defined(MOD_WSGI_WITH_DAEMONS) apr_table_setn(r->subprocess_env, "mod_wsgi.listener_host", c->local_addr->hostname ? c->local_addr->hostname : ""); apr_table_setn(r->subprocess_env, "mod_wsgi.listener_port", apr_psprintf(r->pool, "%d", c->local_addr->port)); #endif apr_table_setn(r->subprocess_env, "mod_wsgi.input_chunked", apr_psprintf(r->pool, "%d", !!r->read_chunked)); apr_table_setn(r->subprocess_env, "mod_wsgi.enable_sendfile", apr_psprintf(r->pool, "%d", config->enable_sendfile)); #if defined(MOD_WSGI_WITH_DAEMONS) apr_table_setn(r->subprocess_env, "mod_wsgi.queue_start", apr_psprintf(r->pool, "%" APR_TIME_T_FMT, r->request_time)); #endif } typedef struct { PyObject_HEAD request_rec *r; WSGIRequestConfig *config; PyObject *log; } DispatchObject; static DispatchObject *newDispatchObject(request_rec *r, WSGIRequestConfig *config) { DispatchObject *self; self = PyObject_New(DispatchObject, &Dispatch_Type); if (self == NULL) return NULL; self->config = config; self->r = r; self->log = newLogObject(r, APLOG_ERR, NULL); return self; } static void Dispatch_dealloc(DispatchObject *self) { Py_DECREF(self->log); PyObject_Del(self); } static PyObject *Dispatch_environ(DispatchObject *self, const char *group) { request_rec *r = NULL; PyObject *vars = NULL; PyObject *object = NULL; const apr_array_header_t *head = NULL; const apr_table_entry_t *elts = NULL; int i = 0; /* Create the WSGI environment dictionary. */ vars = PyDict_New(); /* Merge the CGI environment into the WSGI environment. */ r = self->r; head = apr_table_elts(r->subprocess_env); elts = (apr_table_entry_t *)head->elts; for (i = 0; i < head->nelts; ++i) { if (elts[i].key) { if (elts[i].val) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(elts[i].val, strlen(elts[i].val), NULL); #else object = PyString_FromString(elts[i].val); #endif PyDict_SetItemString(vars, elts[i].key, object); Py_DECREF(object); } else PyDict_SetItemString(vars, elts[i].key, Py_None); } } /* * Need to override process and application group as they * are set to the default target, where as we need to set * them to context dispatch script is run in. Also need * to remove callable object reference. */ #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString(""); #else object = PyString_FromString(""); #endif PyDict_SetItemString(vars, "mod_wsgi.process_group", object); Py_DECREF(object); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(group, strlen(group), NULL); #else object = PyString_FromString(group); #endif PyDict_SetItemString(vars, "mod_wsgi.application_group", object); Py_DECREF(object); PyDict_DelItemString(vars, "mod_wsgi.callable_object"); /* * Setup log object for WSGI errors. Don't decrement * reference to log object as keep reference to it. */ object = (PyObject *)self->log; PyDict_SetItemString(vars, "wsgi.errors", object); /* * If Apache extensions are enabled add a CObject reference * to the Apache request_rec structure instance. */ if (!wsgi_daemon_pool && self->config->pass_apache_request) { #if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2 object = PyCapsule_New(self->r, 0, 0); #else object = PyCObject_FromVoidPtr(self->r, 0); #endif PyDict_SetItemString(vars, "apache.request_rec", object); Py_DECREF(object); } /* * Extensions for accessing SSL certificate information from * mod_ssl when in use. */ #if 0 #if AP_SERVER_MAJORVERSION_NUMBER >= 2 object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https"); PyDict_SetItemString(vars, "mod_ssl.is_https", object); Py_DECREF(object); object = PyObject_GetAttrString((PyObject *)self, "ssl_var_lookup"); PyDict_SetItemString(vars, "mod_ssl.var_lookup", object); Py_DECREF(object); #endif #endif return vars; } #if AP_SERVER_MAJORVERSION_NUMBER >= 2 static PyObject *Dispatch_ssl_is_https(DispatchObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, ":ssl_is_https")) return NULL; ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (ssl_is_https == 0) return Py_BuildValue("i", 0); return Py_BuildValue("i", ssl_is_https(self->r->connection)); } static PyObject *Dispatch_ssl_var_lookup(DispatchObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *ssl_var_lookup = 0; PyObject *item = NULL; char *name = 0; char *value = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:ssl_var_lookup", &item)) return NULL; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "byte string value expected, " "value containing non 'latin-1' characters found"); Py_DECREF(item); return NULL; } Py_DECREF(item); item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "byte string value expected, value " "of type %.200s found", item->ob_type->tp_name); Py_DECREF(item); return NULL; } name = PyString_AsString(item); ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); if (ssl_var_lookup == 0) { Py_XINCREF(Py_None); return Py_None; } value = ssl_var_lookup(self->r->pool, self->r->server, self->r->connection, self->r, name); if (!value) { Py_XINCREF(Py_None); return Py_None; } #if PY_MAJOR_VERSION >= 3 return PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else return PyString_FromString(value); #endif } #endif static PyMethodDef Dispatch_methods[] = { #if AP_SERVER_MAJORVERSION_NUMBER >= 2 { "ssl_is_https", (PyCFunction)Dispatch_ssl_is_https, METH_VARARGS, 0 }, { "ssl_var_lookup", (PyCFunction)Dispatch_ssl_var_lookup, METH_VARARGS, 0 }, #endif { NULL, NULL} }; static PyTypeObject Dispatch_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Dispatch", /*tp_name*/ sizeof(DispatchObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Dispatch_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ Dispatch_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; static int wsgi_execute_dispatch(request_rec *r) { WSGIRequestConfig *config; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script = NULL; const char *group = NULL; int status; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); if (!config->dispatch_script) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Location of WSGI dispatch " "script not provided.", getpid()); return HTTP_INTERNAL_SERVER_ERROR; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->dispatch_script->handler_script; group = wsgi_server_group(r, config->dispatch_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return HTTP_INTERNAL_SERVER_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for dispatch scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Assume everything will be okay for now. */ status = OK; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; DispatchObject *adapter = NULL; module_dict = PyModule_GetDict(module); adapter = newDispatchObject(r, config); if (adapter) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *method = NULL; vars = Dispatch_environ(adapter, group); /* First check process_group(). */ #if defined(MOD_WSGI_WITH_DAEMONS) object = PyDict_GetItemString(module_dict, "process_group"); if (object) { PyObject *result = NULL; if (adapter) { Py_INCREF(object); args = Py_BuildValue("(O)", vars); result = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); if (result) { if (result != Py_None) { if (PyString_Check(result)) { const char *s; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_process_group(r, s); config->process_group = s; apr_table_setn(r->subprocess_env, "mod_wsgi.process_group", config->process_group); } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(result); if (!latin_item) { PyErr_SetString(PyExc_TypeError, "Process group must be " "a byte string, value " "containing non 'latin-1' " "characters found"); status = HTTP_INTERNAL_SERVER_ERROR; } else { const char *s; Py_DECREF(result); result = latin_item; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_process_group(r, s); config->process_group = s; apr_table_setn(r->subprocess_env, "mod_wsgi.process_group", config->process_group); } } #endif else { PyErr_SetString(PyExc_TypeError, "Process " "group must be a byte string"); status = HTTP_INTERNAL_SERVER_ERROR; } } Py_DECREF(result); } else status = HTTP_INTERNAL_SERVER_ERROR; } else Py_DECREF(object); object = NULL; } #endif /* Now check application_group(). */ if (status == OK) object = PyDict_GetItemString(module_dict, "application_group"); if (object) { PyObject *result = NULL; if (adapter) { Py_INCREF(object); args = Py_BuildValue("(O)", vars); result = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); if (result) { if (result != Py_None) { if (PyString_Check(result)) { const char *s; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_application_group(r, s); config->application_group = s; apr_table_setn(r->subprocess_env, "mod_wsgi.application_group", config->application_group); } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(result); if (!latin_item) { PyErr_SetString(PyExc_TypeError, "Application group must " "be a byte string, value " "containing non 'latin-1' " "characters found"); status = HTTP_INTERNAL_SERVER_ERROR; } else { const char *s; Py_DECREF(result); result = latin_item; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_application_group(r, s); config->application_group = s; apr_table_setn(r->subprocess_env, "mod_wsgi.application_group", config->application_group); } } #endif else { PyErr_SetString(PyExc_TypeError, "Application " "group must be a string " "object"); status = HTTP_INTERNAL_SERVER_ERROR; } } Py_DECREF(result); } else status = HTTP_INTERNAL_SERVER_ERROR; } else Py_DECREF(object); object = NULL; } /* Now check callable_object(). */ if (status == OK) object = PyDict_GetItemString(module_dict, "callable_object"); if (object) { PyObject *result = NULL; if (adapter) { Py_INCREF(object); args = Py_BuildValue("(O)", vars); result = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); if (result) { if (result != Py_None) { if (PyString_Check(result)) { const char *s; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_callable_object(r, s); config->callable_object = s; apr_table_setn(r->subprocess_env, "mod_wsgi.callable_object", config->callable_object); } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(result); if (!latin_item) { PyErr_SetString(PyExc_TypeError, "Callable object must " "be a byte string, value " "containing non 'latin-1' " "characters found"); status = HTTP_INTERNAL_SERVER_ERROR; } else { const char *s; Py_DECREF(result); result = latin_item; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_callable_object(r, s); config->callable_object = s; apr_table_setn(r->subprocess_env, "mod_wsgi.callable_object", config->callable_object); } } #endif else { PyErr_SetString(PyExc_TypeError, "Callable " "object must be a string " "object"); status = HTTP_INTERNAL_SERVER_ERROR; } } Py_DECREF(result); } else status = HTTP_INTERNAL_SERVER_ERROR; } else Py_DECREF(object); object = NULL; } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); object = PyEval_CallObject(method, args); Py_DECREF(args); } Py_XDECREF(object); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script); Py_DECREF(vars); } } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } static int wsgi_is_script_aliased(request_rec *r) { const char *t = NULL; t = apr_table_get(r->notes, "alias-forced-type"); return t && (!strcasecmp(t, "wsgi-script")); } #if defined(MOD_WSGI_WITH_DAEMONS) static int wsgi_execute_remote(request_rec *r); #endif static int wsgi_hook_handler(request_rec *r) { int status; apr_off_t limit = 0; WSGIRequestConfig *config = NULL; const char *value = NULL; /* Filter out the obvious case of no handler defined. */ if (!r->handler) return DECLINED; /* * Construct request configuration and cache it in the * request object against this module so can access it later * from handler code. */ config = wsgi_create_req_config(r->pool, r); ap_set_module_config(r->request_config, &wsgi_module, config); /* * Only process requests for this module. First check for * where target is the actual WSGI script. Then need to * check for the case where handler name mapped to a handler * script definition. */ if (!strcmp(r->handler, "wsgi-script") || !strcmp(r->handler, "application/x-httpd-wsgi")) { /* * Ensure that have adequate privileges to run the WSGI * script. Require ExecCGI to be specified in Options for * this. In doing this, using the wider interpretation that * ExecCGI refers to any executable like script even though * not a separate process execution. */ if (!(ap_allow_options(r) & OPT_EXECCGI) && !wsgi_is_script_aliased(r)) { wsgi_log_script_error(r, "Options ExecCGI is off in this " "directory", r->filename); return HTTP_FORBIDDEN; } /* Ensure target script exists and is a file. */ #if AP_SERVER_MAJORVERSION_NUMBER < 2 if (r->finfo.st_mode == 0) { wsgi_log_script_error(r, "Target WSGI script not found or unable " "to stat", r->filename); return HTTP_NOT_FOUND; } #else if (r->finfo.filetype == 0) { wsgi_log_script_error(r, "Target WSGI script not found or unable " "to stat", r->filename); return HTTP_NOT_FOUND; } #endif #if AP_SERVER_MAJORVERSION_NUMBER < 2 if (S_ISDIR(r->finfo.st_mode)) { wsgi_log_script_error(r, "Attempt to invoke directory as WSGI " "application", r->filename); return HTTP_FORBIDDEN; } #else if (r->finfo.filetype == APR_DIR) { wsgi_log_script_error(r, "Attempt to invoke directory as WSGI " "application", r->filename); return HTTP_FORBIDDEN; } #endif if (wsgi_is_script_aliased(r)) { /* * Allow any configuration supplied through request notes to * override respective values. Request notes are used when * configuration supplied with WSGIScriptAlias directives. */ if (value = apr_table_get(r->notes, "mod_wsgi.process_group")) config->process_group = wsgi_process_group(r, value); if (value = apr_table_get(r->notes, "mod_wsgi.application_group")) config->application_group = wsgi_application_group(r, value); if (value = apr_table_get(r->notes, "mod_wsgi.callable_object")) config->callable_object = value; if (value = apr_table_get(r->notes, "mod_wsgi.pass_authorization")) { if (!strcmp(value, "1")) config->pass_authorization = 1; else config->pass_authorization = 0; } } } #if AP_SERVER_MAJORVERSION_NUMBER >= 2 else if (config->handler_scripts) { WSGIScriptFile *entry; entry = (WSGIScriptFile *)apr_hash_get(config->handler_scripts, r->handler, APR_HASH_KEY_STRING); if (entry) { config->handler_script = entry->handler_script; config->callable_object = "handle_request"; if (value = entry->process_group) config->process_group = wsgi_process_group(r, value); if (value = entry->application_group) config->application_group = wsgi_application_group(r, value); if (value = entry->pass_authorization) { if (!strcmp(value, "1")) config->pass_authorization = 1; else config->pass_authorization = 0; } } else return DECLINED; } #endif else return DECLINED; /* * For Apache 2.0+ honour AcceptPathInfo directive. Default * behaviour is accept additional path information. Under * Apache 1.3, WSGI application would need to check itself. */ #if AP_MODULE_MAGIC_AT_LEAST(20011212,0) if ((r->used_path_info == AP_REQ_REJECT_PATH_INFO) && r->path_info && *r->path_info) { wsgi_log_script_error(r, "AcceptPathInfo off disallows user's path", r->filename); return HTTP_NOT_FOUND; } #endif /* * Setup policy to apply if request contains a body. Note * that WSGI specification doesn't strictly allow for chunked * request content as CONTENT_LENGTH required when reading * input and application isn't meant to read more than what * is defined by CONTENT_LENGTH. To allow chunked request * content tell Apache to dechunk it. For application to use * the content, it has to ignore WSGI specification and use * read() with no arguments to read all available input, or * call read() with specific block size until read() returns * an empty string. */ if (config->chunked_request) status = ap_setup_client_block(r, REQUEST_CHUNKED_DECHUNK); else status = ap_setup_client_block(r, REQUEST_CHUNKED_ERROR); if (status != OK) return status; /* * Check to see if request content is too large and end * request here. We do this as otherwise it will not be done * until first time input data is read in application. * Problem is that underlying HTTP output filter will * also generate a 413 response and the error raised from * the application will be appended to that. The call to * ap_discard_request_body() is hopefully enough to trigger * sending of the 413 response by the HTTP filter. */ limit = ap_get_limit_req_body(r); if (limit && limit < r->remaining) { ap_discard_request_body(r); return OK; } /* Build the sub process environment. */ wsgi_build_environment(r); /* * If a dispatch script has been provided, as appropriate * allow it to override any of the configuration related * to what context the script will be executed in and what * the target callable object for the application is. */ if (config->dispatch_script) { status = wsgi_execute_dispatch(r); if (status != OK) return status; } /* * Execute the target WSGI application script or proxy * request to one of the daemon processes as appropriate. */ #if defined(MOD_WSGI_WITH_DAEMONS) status = wsgi_execute_remote(r); if (status != DECLINED) return status; #endif #if defined(MOD_WSGI_DISABLE_EMBEDDED) wsgi_log_script_error(r, "Embedded mode of mod_wsgi disabled at compile " "time", r->filename); return HTTP_INTERNAL_SERVER_ERROR; #endif if (wsgi_server_config->restrict_embedded == 1) { wsgi_log_script_error(r, "Embedded mode of mod_wsgi disabled by " "runtime configuration", r->filename); return HTTP_INTERNAL_SERVER_ERROR; } return wsgi_execute_script(r); } #if AP_SERVER_MAJORVERSION_NUMBER < 2 /* * Apache 1.3 module initialisation functions. */ static void wsgi_hook_init(server_rec *s, apr_pool_t *p) { char package[128]; /* Setup module version information. */ sprintf(package, "mod_wsgi/%s", MOD_WSGI_VERSION_STRING); ap_add_version_component(package); /* Record Python version string with Apache. */ if (!Py_IsInitialized()) { char buffer[256]; const char *token = NULL; const char *version = NULL; version = Py_GetVersion(); token = version; while (*token && *token != ' ') token++; strcpy(buffer, "Python/"); strncat(buffer, version, token - version); ap_add_version_component(buffer); } /* Retain reference to base server. */ wsgi_server = s; /* Retain record of parent process ID. */ wsgi_parent_pid = getpid(); /* Determine whether multiprocess and/or multithreaded. */ wsgi_multiprocess = 1; wsgi_multithread = 0; /* Retain reference to main server config. */ wsgi_server_config = ap_get_module_config(s->module_config, &wsgi_module); /* * Check that the version of Python found at * runtime is what was used at compilation. */ wsgi_python_version(); /* * Initialise Python if required to be done in * the parent process. Note that it will not be * initialised if mod_python loaded and it has * already been done. */ if (!wsgi_python_after_fork) wsgi_python_init(p); } static void wsgi_hook_child_init(server_rec *s, apr_pool_t *p) { if (wsgi_python_required) { /* * Initialise Python if required to be done in * the child process. Note that it will not be * initialised if mod_python loaded and it has * already been done. */ if (wsgi_python_after_fork) wsgi_python_init(p); /* * Now perform additional initialisation steps * always done in child process. */ wsgi_python_child_init(p); } } /* Dispatch list of content handlers */ static const handler_rec wsgi_handlers[] = { { "wsgi-script", wsgi_hook_handler }, { "application/x-httpd-wsgi", wsgi_hook_handler }, { NULL, NULL } }; static const command_rec wsgi_commands[] = { { "WSGIScriptAlias", wsgi_add_script_alias, NULL, RSRC_CONF, RAW_ARGS, "Map location to target WSGI script file." }, { "WSGIScriptAliasMatch", wsgi_add_script_alias, "*", RSRC_CONF, RAW_ARGS, "Map location to target WSGI script file." }, { "WSGIVerboseDebugging", wsgi_set_verbose_debugging, NULL, RSRC_CONF, TAKE1, "Enable/Disable verbose debugging messages." }, #if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6 { "WSGIPy3kWarningFlag", wsgi_set_py3k_warning_flag, NULL, RSRC_CONF, TAKE1, "Enable/Disable Python 3.0 warnings." }, { "WSGIDontWriteBytecode", wsgi_set_dont_write_bytecode, NULL, RSRC_CONF, TAKE1, "Enable/Disable writing of byte code." }, #endif { "WSGIPythonWarnings", wsgi_add_python_warnings, NULL, RSRC_CONF, TAKE1, "Control Python warning messages." }, { "WSGIPythonOptimize", wsgi_set_python_optimize, NULL, RSRC_CONF, TAKE1, "Set level of Python compiler optimisations." }, { "WSGIPythonHome", wsgi_set_python_home, NULL, RSRC_CONF, TAKE1, "Python prefix/exec_prefix absolute path names." }, { "WSGIPythonPath", wsgi_set_python_path, NULL, RSRC_CONF, TAKE1, "Python module search path." }, { "WSGIPythonEggs", wsgi_set_python_eggs, NULL, RSRC_CONF, TAKE1, "Python eggs cache directory." }, { "WSGIRestrictStdin", wsgi_set_restrict_stdin, NULL, RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of STDIN." }, { "WSGIRestrictStdout", wsgi_set_restrict_stdout, NULL, RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of STDOUT." }, { "WSGIRestrictSignal", wsgi_set_restrict_signal, NULL, RSRC_CONF, TAKE1, "Enable/Disable restrictions on use of signal()." }, { "WSGICaseSensitivity", wsgi_set_case_sensitivity, NULL, RSRC_CONF, TAKE1, "Define whether file system is case sensitive." }, { "WSGIApplicationGroup", wsgi_set_application_group, NULL, ACCESS_CONF|RSRC_CONF, TAKE1, "Application interpreter group." }, { "WSGICallableObject", wsgi_set_callable_object, NULL, OR_FILEINFO, TAKE1, "Name of entry point in WSGI script file." }, { "WSGIImportScript", wsgi_add_import_script, NULL, RSRC_CONF, RAW_ARGS, "Location of WSGI import script." }, { "WSGIDispatchScript", wsgi_set_dispatch_script, NULL, ACCESS_CONF|RSRC_CONF, RAW_ARGS, "Location of WSGI dispatch script." }, { "WSGIPassAuthorization", wsgi_set_pass_authorization, NULL, OR_FILEINFO, TAKE1, "Enable/Disable WSGI authorization." }, { "WSGIScriptReloading", wsgi_set_script_reloading, NULL, OR_FILEINFO, TAKE1, "Enable/Disable script reloading mechanism." }, { "WSGIChunkedRequest", wsgi_set_chunked_request, NULL, OR_FILEINFO, TAKE1, "Enable/Disable support for chunked request." }, { NULL } }; /* Dispatch list for API hooks */ module MODULE_VAR_EXPORT wsgi_module = { STANDARD_MODULE_STUFF, wsgi_hook_init, /* module initializer */ wsgi_create_dir_config, /* create per-dir config structures */ wsgi_merge_dir_config, /* merge per-dir config structures */ wsgi_create_server_config, /* create per-server config structures */ wsgi_merge_server_config, /* merge per-server config structures */ wsgi_commands, /* table of config file commands */ wsgi_handlers, /* [#8] MIME-typed-dispatched handlers */ wsgi_hook_intercept, /* [#1] URI to filename translation */ NULL, /* [#4] validate user id from request */ NULL, /* [#5] check if the user is ok _here_ */ NULL, /* [#3] check access by host address */ NULL, /* [#6] determine MIME type */ NULL, /* [#7] pre-run fixups */ NULL, /* [#9] log a transaction */ NULL, /* [#2] header parser */ wsgi_hook_child_init, /* child_init */ NULL, /* child_exit */ NULL /* [#0] post read-request */ #ifdef EAPI ,NULL, /* EAPI: add_module */ NULL, /* EAPI: remove_module */ NULL, /* EAPI: rewrite_command */ NULL /* EAPI: new_connection */ #endif }; #else /* * Apache 2.X and UNIX specific code for creation and management * of distinct daemon processes. */ #if defined(MOD_WSGI_WITH_DAEMONS) static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig, const char *args) { const char *name = NULL; const char *user = NULL; const char *group = NULL; int processes = 1; int multiprocess = 0; int threads = 15; int umask = -1; const char *root = NULL; const char *home = NULL; const char *lang = NULL; const char *locale = NULL; const char *python_home = NULL; const char *python_path = NULL; const char *python_eggs = NULL; int stack_size = 0; int maximum_requests = 0; int shutdown_timeout = 5; int deadlock_timeout = 300; int inactivity_timeout = 0; const char *display_name = NULL; int send_buffer_size = 0; int recv_buffer_size = 0; const char *script_user = NULL; const char *script_group = NULL; int cpu_time_limit = 0; int cpu_priority = 0; apr_int64_t memory_limit = 0; apr_int64_t virtual_memory_limit = 0; uid_t uid; uid_t gid; const char *groups_list = NULL; int groups_count = 0; gid_t *groups = NULL; const char *option = NULL; const char *value = NULL; WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i; /* * Set the defaults for user/group from values * defined for the User/Group directives in main * Apache configuration. */ uid = ap_unixd_config.user_id; user = ap_unixd_config.user_name; gid = ap_unixd_config.group_id; /* Now parse options for directive. */ name = ap_getword_conf(cmd->pool, &args); if (!name || !*name) return "Name of WSGI daemon process not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI daemon process definition."; } if (!strcmp(option, "user")) { if (!*value) return "Invalid user for WSGI daemon process."; user = value; uid = ap_uname2id(user); if (uid == 0) return "WSGI process blocked from running as root."; if (*user == '#') { struct passwd *entry = NULL; if ((entry = getpwuid(uid)) == NULL) return "Couldn't determine user name from uid."; user = entry->pw_name; } } else if (!strcmp(option, "group")) { if (!*value) return "Invalid group for WSGI daemon process."; group = value; gid = ap_gname2id(group); } else if (!strcmp(option, "supplementary-groups")) { groups_list = value; } else if (!strcmp(option, "processes")) { if (!*value) return "Invalid process count for WSGI daemon process."; processes = atoi(value); if (processes < 1) return "Invalid process count for WSGI daemon process."; multiprocess = 1; } else if (!strcmp(option, "threads")) { if (!*value) return "Invalid thread count for WSGI daemon process."; threads = atoi(value); if (threads < 1 || threads >= WSGI_STACK_LAST-1) return "Invalid thread count for WSGI daemon process."; } else if (!strcmp(option, "umask")) { if (!*value) return "Invalid umask for WSGI daemon process."; errno = 0; umask = strtol(value, (char **)&value, 8); if (*value || errno == ERANGE || umask < 0) return "Invalid umask for WSGI daemon process."; } else if (!strcmp(option, "chroot")) { if (geteuid()) return "Cannot chroot WSGI daemon process when not root."; if (*value != '/') return "Invalid chroot directory for WSGI daemon process."; root = value; } else if (!strcmp(option, "home")) { if (*value != '/') return "Invalid home directory for WSGI daemon process."; home = value; } else if (!strcmp(option, "lang")) { lang = value; } else if (!strcmp(option, "locale")) { locale = value; } else if (!strcmp(option, "python-home")) { python_home = value; } else if (!strcmp(option, "python-path")) { python_path = value; } else if (!strcmp(option, "python-eggs")) { python_eggs = value; } #if (APR_MAJOR_VERSION >= 1) else if (!strcmp(option, "stack-size")) { if (!*value) return "Invalid stack size for WSGI daemon process."; stack_size = atoi(value); if (stack_size <= 0) return "Invalid stack size for WSGI daemon process."; } #endif else if (!strcmp(option, "maximum-requests")) { if (!*value) return "Invalid request count for WSGI daemon process."; maximum_requests = atoi(value); if (maximum_requests < 0) return "Invalid request count for WSGI daemon process."; } else if (!strcmp(option, "shutdown-timeout")) { if (!*value) return "Invalid shutdown timeout for WSGI daemon process."; shutdown_timeout = atoi(value); if (shutdown_timeout < 0) return "Invalid shutdown timeout for WSGI daemon process."; } else if (!strcmp(option, "deadlock-timeout")) { if (!*value) return "Invalid deadlock timeout for WSGI daemon process."; deadlock_timeout = atoi(value); if (deadlock_timeout < 0) return "Invalid deadlock timeout for WSGI daemon process."; } else if (!strcmp(option, "inactivity-timeout")) { if (!*value) return "Invalid inactivity timeout for WSGI daemon process."; inactivity_timeout = atoi(value); if (inactivity_timeout < 0) return "Invalid inactivity timeout for WSGI daemon process."; } else if (!strcmp(option, "display-name")) { display_name = value; } else if (!strcmp(option, "send-buffer-size")) { if (!*value) return "Invalid send buffer size for WSGI daemon process."; send_buffer_size = atoi(value); if (send_buffer_size < 512 && send_buffer_size != 0) { return "Send buffer size must be >= 512 bytes, " "or 0 for system default."; } } else if (!strcmp(option, "receive-buffer-size")) { if (!*value) return "Invalid receive buffer size for WSGI daemon process."; recv_buffer_size = atoi(value); if (recv_buffer_size < 512 && recv_buffer_size != 0) { return "Receive buffer size must be >= 512 bytes, " "or 0 for system default."; } } else if (!strcmp(option, "script-user")) { uid_t script_uid; if (!*value) return "Invalid script user for WSGI daemon process."; script_uid = ap_uname2id(value); if (*value == '#') { struct passwd *entry = NULL; if ((entry = getpwuid(script_uid)) == NULL) return "Couldn't determine uid from script user."; value = entry->pw_name; } script_user = value; } else if (!strcmp(option, "script-group")) { gid_t script_gid; if (!*value) return "Invalid script group for WSGI daemon process."; script_gid = ap_gname2id(value); if (*value == '#') { struct group *entry = NULL; if ((entry = getgrgid(script_gid)) == NULL) return "Couldn't determine gid from script group."; value = entry->gr_name; } script_group = value; } else if (!strcmp(option, "cpu-time-limit")) { if (!*value) return "Invalid CPU time limit for WSGI daemon process."; cpu_time_limit = atoi(value); if (cpu_time_limit < 0) return "Invalid CPU time limit for WSGI daemon process."; } else if (!strcmp(option, "cpu-priority")) { if (!*value) return "Invalid CPU priority for WSGI daemon process."; cpu_priority = atoi(value); } else if (!strcmp(option, "memory-limit")) { if (!*value) return "Invalid memory limit for WSGI daemon process."; memory_limit = apr_atoi64(value); if (memory_limit < 0) return "Invalid memory limit for WSGI daemon process."; } else if (!strcmp(option, "virtual-memory-limit")) { if (!*value) return "Invalid virtual memory limit for WSGI daemon process."; virtual_memory_limit = apr_atoi64(value); if (virtual_memory_limit < 0) return "Invalid virtual memory limit for WSGI daemon process."; } else return "Invalid option to WSGI daemon process definition."; } if (script_user && script_group) return "Only one of script-user and script-group allowed."; if (groups_list) { const char *group_name = NULL; int groups_maximum = NGROUPS_MAX; const char *items = NULL; #ifdef _SC_NGROUPS_MAX groups_maximum = sysconf(_SC_NGROUPS_MAX); if (groups_maximum < 0) groups_maximum = NGROUPS_MAX; #endif groups = (gid_t *)apr_pcalloc(cmd->pool, groups_maximum*sizeof(groups[0])); groups[groups_count++] = gid; items = groups_list; group_name = ap_getword(cmd->pool, &items, ','); while (group_name && *group_name) { if (groups_count > groups_maximum) return "Too many supplementary groups WSGI daemon process"; groups[groups_count++] = ap_gname2id(group_name); group_name = ap_getword(cmd->pool, &items, ','); } } if (!wsgi_daemon_list) { wsgi_daemon_list = apr_array_make(cmd->pool, 20, sizeof(WSGIProcessGroup)); } entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (!strcmp(entry->name, name)) return "Name duplicates previous WSGI daemon definition."; } wsgi_daemon_count++; entry = (WSGIProcessGroup *)apr_array_push(wsgi_daemon_list); entry->server = cmd->server; entry->random = random(); entry->id = wsgi_daemon_count; entry->name = apr_pstrdup(cmd->pool, name); entry->user = apr_pstrdup(cmd->pool, user); entry->group = apr_pstrdup(cmd->pool, group); entry->uid = uid; entry->gid = gid; entry->groups_list = groups_list; entry->groups_count = groups_count; entry->groups = groups; entry->processes = processes; entry->multiprocess = multiprocess; entry->threads = threads; entry->umask = umask; entry->root = root; entry->home = home; entry->lang = lang; entry->locale = locale; entry->python_home = python_home; entry->python_path = python_path; entry->python_eggs = python_eggs; entry->stack_size = stack_size; entry->maximum_requests = maximum_requests; entry->shutdown_timeout = shutdown_timeout; entry->deadlock_timeout = apr_time_from_sec(deadlock_timeout); entry->inactivity_timeout = apr_time_from_sec(inactivity_timeout); entry->display_name = display_name; entry->send_buffer_size = send_buffer_size; entry->recv_buffer_size = recv_buffer_size; entry->script_user = script_user; entry->script_group = script_group; entry->cpu_time_limit = cpu_time_limit; entry->cpu_priority = cpu_priority; entry->memory_limit = memory_limit; entry->virtual_memory_limit = virtual_memory_limit; entry->listener_fd = -1; return NULL; } static const char *wsgi_set_socket_prefix(cmd_parms *cmd, void *mconfig, const char *arg) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->socket_prefix = ap_server_root_relative(cmd->pool, arg); if (!sconfig->socket_prefix) { return apr_pstrcat(cmd->pool, "Invalid WSGISocketPrefix '", arg, "'.", NULL); } return NULL; } static const char wsgi_valid_accept_mutex_string[] = "Valid accept mutex mechanisms for this platform are: default" #if APR_HAS_FLOCK_SERIALIZE ", flock" #endif #if APR_HAS_FCNTL_SERIALIZE ", fcntl" #endif #if APR_HAS_SYSVSEM_SERIALIZE ", sysvsem" #endif #if APR_HAS_POSIXSEM_SERIALIZE ", posixsem" #endif #if APR_HAS_PROC_PTHREAD_SERIALIZE ", pthread" #endif "."; static const char *wsgi_set_accept_mutex(cmd_parms *cmd, void *mconfig, const char *arg) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); #if !defined(AP_ACCEPT_MUTEX_TYPE) sconfig->lock_mechanism = ap_accept_lock_mech; #else sconfig->lock_mechanism = APR_LOCK_DEFAULT; #endif if (!strcasecmp(arg, "default")) { sconfig->lock_mechanism = APR_LOCK_DEFAULT; } #if APR_HAS_FLOCK_SERIALIZE else if (!strcasecmp(arg, "flock")) { sconfig->lock_mechanism = APR_LOCK_FLOCK; } #endif #if APR_HAS_FCNTL_SERIALIZE else if (!strcasecmp(arg, "fcntl")) { sconfig->lock_mechanism = APR_LOCK_FCNTL; } #endif #if APR_HAS_SYSVSEM_SERIALIZE && !defined(PERCHILD_MPM) else if (!strcasecmp(arg, "sysvsem")) { sconfig->lock_mechanism = APR_LOCK_SYSVSEM; } #endif #if APR_HAS_POSIXSEM_SERIALIZE else if (!strcasecmp(arg, "posixsem")) { sconfig->lock_mechanism = APR_LOCK_POSIXSEM; } #endif #if APR_HAS_PROC_PTHREAD_SERIALIZE else if (!strcasecmp(arg, "pthread")) { sconfig->lock_mechanism = APR_LOCK_PROC_PTHREAD; } #endif else { return apr_pstrcat(cmd->pool, "Accept mutex lock mechanism '", arg, "' is invalid. ", wsgi_valid_accept_mutex_string, NULL); } return NULL; } static apr_file_t *wsgi_signal_pipe_in = NULL; static apr_file_t *wsgi_signal_pipe_out = NULL; static int wsgi_cpu_time_limit_exceeded = 0; static void wsgi_signal_handler(int signum) { apr_size_t nbytes = 1; if (signum == SIGXCPU) wsgi_cpu_time_limit_exceeded = 1; apr_file_write(wsgi_signal_pipe_out, "X", &nbytes); apr_file_flush(wsgi_signal_pipe_out); wsgi_daemon_shutdown++; } static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon); static void wsgi_manage_process(int reason, void *data, apr_wait_t status) { WSGIDaemonProcess *daemon = data; switch (reason) { /* Child daemon process has died. */ case APR_OC_REASON_DEATH: { int mpm_state; int stopping; /* Stop watching the existing process. */ apr_proc_other_child_unregister(daemon); /* * Determine if Apache is being shutdown or not and * if it is not being shutdown, restart the child * daemon process that has died. If MPM doesn't * support query assume that child daemon process * shouldn't be restarted. Both prefork and worker * MPMs support this query so should always be okay. */ stopping = 1; if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state) == APR_SUCCESS && mpm_state != AP_MPMQ_STOPPING) { stopping = 0; } if (!stopping) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' has died, restarting.", daemon->process.pid, daemon->group->name); wsgi_start_process(wsgi_parent_pool, daemon); } break; } /* Apache is being restarted or shutdown. */ case APR_OC_REASON_RESTART: { /* Stop watching the existing process. */ apr_proc_other_child_unregister(daemon); break; } /* Child daemon process vanished. */ case APR_OC_REASON_LOST: { /* Stop watching the existing process. */ apr_proc_other_child_unregister(daemon); /* Restart the child daemon process that has died. */ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' has died, restarting.", daemon->process.pid, daemon->group->name); wsgi_start_process(wsgi_parent_pool, daemon); break; } /* Call to unregister the process. */ case APR_OC_REASON_UNREGISTER: { /* Nothing to do at present. */ break; } } } static void wsgi_setup_daemon_name(WSGIDaemonProcess *daemon, apr_pool_t *p) { const char *display_name = NULL; #if !(defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) int slen = 0; int dlen = 0; char *argv0 = NULL; #endif display_name = daemon->group->display_name; if (!display_name) return; if (!strcmp(display_name, "%{GROUP}")) { display_name = apr_pstrcat(p, "(wsgi:", daemon->group->name, ")", NULL); } /* * Only argv[0] is guaranteed to be the real things as MPM * modules may make modifications to subsequent arguments. * Thus can only replace the argv[0] value. Because length * is restricted, need to truncate display name if too long. */ #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) setproctitle("%s", display_name); #else argv0 = (char*)wsgi_server->process->argv[0]; dlen = strlen(argv0); slen = strlen(display_name); memset(argv0, ' ', dlen); if (slen < dlen) memcpy(argv0, display_name, slen); else memcpy(argv0, display_name, dlen); #endif } static void wsgi_setup_access(WSGIDaemonProcess *daemon) { /* Setup the umask for the effective user. */ if (daemon->group->umask != -1) umask(daemon->group->umask); /* Change to chroot environment. */ if (daemon->group->root) { if (chroot(daemon->group->root) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable to change root " "directory to '%s'.", getpid(), daemon->group->root); } } /* Setup the working directory.*/ if (daemon->group->home) { if (chdir(daemon->group->home) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable to change working " "directory to '%s'.", getpid(), daemon->group->home); } } else if (geteuid()) { struct passwd *pwent; pwent = getpwuid(geteuid()); if (pwent) { if (chdir(pwent->pw_dir) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable to change working " "directory to '%s'.", getpid(), pwent->pw_dir); } } else { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable to determine home " "directory for uid=%ld.", getpid(), (long)geteuid()); } } else { struct passwd *pwent; pwent = getpwuid(daemon->group->uid); if (pwent) { if (chdir(pwent->pw_dir) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable to change working " "directory to '%s'.", getpid(), pwent->pw_dir); } } else { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable to determine home " "directory for uid=%ld.", getpid(), (long)daemon->group->uid); } } /* Don't bother switch user/group if not root. */ if (geteuid()) return; /* Setup the daemon process real and effective group. */ if (setgid(daemon->group->gid) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable to set group id to gid=%u.", getpid(), (unsigned)daemon->group->gid); } else { if (daemon->group->groups) { if (setgroups(daemon->group->groups_count, daemon->group->groups) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable " "to set supplementary groups for uname=%s " "of '%s'.", getpid(), daemon->group->user, daemon->group->groups_list); } } else if (initgroups(daemon->group->user, daemon->group->gid) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable " "to set groups for uname=%s and gid=%u.", getpid(), daemon->group->user, (unsigned)daemon->group->gid); } } /* Setup the daemon process real and effective user. */ if (setuid(daemon->group->uid) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Unable to change to uid=%ld.", getpid(), (long)daemon->group->uid); } } static int wsgi_setup_socket(WSGIProcessGroup *process) { int sockfd = -1; struct sockaddr_un addr; mode_t omask; int rc; int sendsz = process->send_buffer_size; int recvsz = process->recv_buffer_size; ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Socket for '%s' is '%s'.", getpid(), process->name, process->socket); if ((sockfd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Couldn't create unix domain " "socket.", getpid()); return -1; } #ifdef SO_SNDBUF if (sendsz) { if (setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, (void *)&sendsz, sizeof(sendsz)) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(errno), wsgi_server, "mod_wsgi (pid=%d): Failed to set send buffer " "size on daemon process socket.", getpid()); } } #endif #ifdef SO_RCVBUF if (recvsz) { if (setsockopt(sockfd, SOL_SOCKET, SO_RCVBUF, (void *)&recvsz, sizeof(recvsz)) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(errno), wsgi_server, "mod_wsgi (pid=%d): Failed to set receive buffer " "size on daemon process socket.", getpid()); } } #endif memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; apr_cpystrn(addr.sun_path, process->socket, sizeof(addr.sun_path)); omask = umask(0077); rc = bind(sockfd, (struct sockaddr *)&addr, sizeof(addr)); if (rc < 0 && errno == EADDRINUSE) { ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(errno), wsgi_server, "mod_wsgi (pid=%d): Removing stale unix domain " "socket '%s'.", getpid(), process->socket); unlink(process->socket); rc = bind(sockfd, (struct sockaddr *)&addr, sizeof(addr)); } umask(omask); if (rc < 0) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Couldn't bind unix domain " "socket '%s'.", getpid(), process->socket); return -1; } if (listen(sockfd, WSGI_LISTEN_BACKLOG) < 0) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Couldn't listen on unix domain " "socket.", getpid()); return -1; } /* * Set the ownership of the UNIX listener socket. This would * normally be the Apache user that the Apache server child * processes run as, as they are the only processes that * would connect to the sockets. In the case of ITK MPM, * having them owned by Apache user is useless as at the * time the request is to be proxied, the Apache server * child process will have uid corresponding to the user * whose request they are handling. For ITK, thus set the * ownership to be the same as the daemon processes. This is * still restrictive, in that can only connect to daemon * process group running under same user, but most of the * time that is what you would want anyway when using ITK * MPM. */ if (!geteuid()) { #if defined(MPM_ITK) if (chown(process->socket, process->uid, -1) < 0) { #else if (chown(process->socket, ap_unixd_config.user_id, -1) < 0) { #endif ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi (pid=%d): Couldn't change owner of unix " "domain socket '%s'.", getpid(), process->socket); return -1; } } return sockfd; } static int wsgi_hook_daemon_handler(conn_rec *c); static void wsgi_process_socket(apr_pool_t *p, apr_socket_t *sock, apr_bucket_alloc_t *bucket_alloc, WSGIDaemonProcess *daemon) { apr_status_t rv; conn_rec *c; ap_sb_handle_t *sbh; core_net_rec *net; /* * This duplicates Apache connection setup. This is done * here rather than letting Apache do it so that avoid the * possibility that any Apache modules, such as mod_ssl * will add their own input/output filters to the chain. */ ap_create_sb_handle(&sbh, p, -1, 0); c = (conn_rec *)apr_pcalloc(p, sizeof(conn_rec)); c->sbh = sbh; c->conn_config = ap_create_conn_config(p); c->notes = apr_table_make(p, 5); c->pool = p; if ((rv = apr_socket_addr_get(&c->local_addr, APR_LOCAL, sock)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(rv), wsgi_server, "mod_wsgi (pid=%d): Failed call " "apr_socket_addr_get(APR_LOCAL).", getpid()); apr_socket_close(sock); return; } apr_sockaddr_ip_get(&c->local_ip, c->local_addr); #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) if ((rv = apr_socket_addr_get(&c->client_addr, APR_REMOTE, sock)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_INFO, rv, wsgi_server, "mod_wsgi (pid=%d): Failed call " "apr_socket_addr_get(APR_REMOTE).", getpid()); apr_socket_close(sock); return; } c->client_ip = "unknown"; #else if ((rv = apr_socket_addr_get(&c->remote_addr, APR_REMOTE, sock)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(rv), wsgi_server, "mod_wsgi (pid=%d): Failed call " "apr_socket_addr_get(APR_REMOTE).", getpid()); apr_socket_close(sock); return; } c->remote_ip = "unknown"; #endif c->base_server = daemon->group->server; c->bucket_alloc = bucket_alloc; c->id = 1; net = apr_palloc(c->pool, sizeof(core_net_rec)); rv = apr_socket_timeout_set(sock, c->base_server->timeout); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(rv), wsgi_server, "mod_wsgi (pid=%d): Failed call " "apr_socket_timeout_set().", getpid()); } net->c = c; net->in_ctx = NULL; net->out_ctx = NULL; net->client_socket = sock; ap_set_module_config(net->c->conn_config, &core_module, sock); ap_add_input_filter_handle(ap_core_input_filter_handle, net, NULL, net->c); ap_add_output_filter_handle(ap_core_output_filter_handle, net, NULL, net->c); wsgi_hook_daemon_handler(c); ap_lingering_close(c); } static apr_status_t wsgi_worker_acquire(int id) { WSGIThreadStack *stack = wsgi_worker_stack; WSGIDaemonThread *thread = &wsgi_worker_threads[id]; while (1) { apr_uint32_t state = stack->state; if (state & (WSGI_STACK_TERMINATED | WSGI_STACK_NO_LISTENER)) { if (state & WSGI_STACK_TERMINATED) { return APR_EINVAL; } if (apr_atomic_cas32(&(stack->state), WSGI_STACK_LAST, state) != state) { continue; } else { return APR_SUCCESS; } } thread->next = state; if (apr_atomic_cas32(&(stack->state), (unsigned)id, state) != state) { continue; } else { apr_status_t rv; if (thread->wakeup) { thread->wakeup = 0; return APR_SUCCESS; } rv = apr_thread_cond_wait(thread->condition, thread->mutex); while (rv == APR_SUCCESS && !thread->wakeup) rv = apr_thread_cond_wait(thread->condition, thread->mutex); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server, "mod_wsgi (pid=%d): " "Wait on thread %d wakeup condition variable " "failed.", getpid(), id); } thread->wakeup = 0; return rv; } } } static apr_status_t wsgi_worker_release() { WSGIThreadStack *stack = wsgi_worker_stack; while (1) { apr_uint32_t state = stack->state; unsigned int first = state & WSGI_STACK_HEAD; if (first == WSGI_STACK_LAST) { if (apr_atomic_cas32(&(stack->state), state | WSGI_STACK_NO_LISTENER, state) != state) { continue; } else { return APR_SUCCESS; } } else { WSGIDaemonThread *thread = &wsgi_worker_threads[first]; if (apr_atomic_cas32(&(stack->state), (state ^ first) | thread->next, state) != state) { continue; } else { /* * Flag that thread should be woken up and then * signal it via the condition variable. */ apr_status_t rv; if ((rv = apr_thread_mutex_lock(thread->mutex)) != APR_SUCCESS) { return rv; } thread->wakeup = 1; if ((rv = apr_thread_mutex_unlock(thread->mutex)) != APR_SUCCESS) { return rv; } return apr_thread_cond_signal(thread->condition); } } } } static apr_status_t wsgi_worker_shutdown() { int i; apr_status_t rv; WSGIThreadStack *stack = wsgi_worker_stack; while (1) { apr_uint32_t state = stack->state; if (apr_atomic_cas32(&(stack->state), state | WSGI_STACK_TERMINATED, state) == state) { break; } } for (i = 0; i < wsgi_daemon_process->group->threads; i++) { if ((rv = wsgi_worker_release()) != APR_SUCCESS) { return rv; } } return APR_SUCCESS; } static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread) { apr_status_t status; apr_socket_t *socket; apr_pool_t *ptrans; apr_pollset_t *pollset; apr_pollfd_t pfd = { 0 }; apr_int32_t numdesc; const apr_pollfd_t *pdesc; apr_bucket_alloc_t *bucket_alloc; WSGIDaemonProcess *daemon = thread->process; WSGIProcessGroup *group = daemon->group; /* Loop until signal received to shutdown daemon process. */ while (!wsgi_daemon_shutdown) { apr_status_t rv; apr_time_t start; apr_time_t duration; /* * Only allow one thread in this process to attempt to * acquire the global process lock as the global process * lock will actually allow all threads in this process * through once one in this process acquires lock. Only * allowing one means better chance of another process * subsequently getting it thereby distributing requests * across processes better and reducing chance of Python * GIL contention. */ wsgi_worker_acquire(thread->id); if (wsgi_daemon_shutdown) break; if (group->mutex) { /* * Grab the accept mutex across all daemon processes * in this process group. */ rv = apr_proc_mutex_lock(group->mutex); if (rv != APR_SUCCESS) { #if defined(EIDRM) /* * When using multiple threads locking the * process accept mutex fails with an EIDRM when * process being shutdown but signal check * hasn't triggered quick enough to set shutdown * flag. This causes lots of error messages to * be logged which make it look like something * nasty has happened even when it hasn't. For * now assume that if multiple threads and EIDRM * occurs that it is okay and the process is * being shutdown. The condition should by * rights only occur when the Apache parent * process is being shutdown or has died for * some reason so daemon process would logically * therefore also be in process of being * shutdown or killed. */ if (!strcmp(apr_proc_mutex_name(group->mutex), "sysvsem")) { if (errno == EIDRM && group->threads > 1) wsgi_daemon_shutdown = 1; } #endif if (!wsgi_daemon_shutdown) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server, "mod_wsgi (pid=%d): " "Couldn't acquire accept mutex '%s'. " "Shutting down daemon process.", getpid(), group->socket); kill(getpid(), SIGTERM); sleep(5); } break; } /* * Daemon process being shutdown so don't accept the * connection after all. */ if (wsgi_daemon_shutdown) { apr_proc_mutex_unlock(group->mutex); wsgi_worker_release(); break; } } apr_pool_create(&ptrans, p); /* * Accept socket connection from the child process. We * test the socket for whether it is ready before actually * performing the accept() so that can know for sure that * we will be processing a request and flag thread as * running. Only bother to do join with thread which is * actually running when process is being shutdown. */ apr_pollset_create(&pollset, 1, ptrans, 0); memset(&pfd, '\0', sizeof(pfd)); pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = daemon->listener; pfd.reqevents = APR_POLLIN; pfd.client_data = daemon; apr_pollset_add(pollset, &pfd); rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc); if (rv != APR_SUCCESS && !APR_STATUS_IS_EINTR(rv)) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server, "mod_wsgi (pid=%d): " "Unable to poll daemon socket for '%s'. " "Shutting down daemon process.", getpid(), group->socket); wsgi_daemon_shutdown++; kill(getpid(), SIGTERM); sleep(5); break; } if (wsgi_daemon_shutdown) { if (group->mutex) apr_proc_mutex_unlock(group->mutex); wsgi_worker_release(); apr_pool_destroy(ptrans); break; } if (rv != APR_SUCCESS && APR_STATUS_IS_EINTR(rv)) { if (group->mutex) apr_proc_mutex_unlock(group->mutex); wsgi_worker_release(); apr_pool_destroy(ptrans); continue; } thread->running = 1; status = apr_socket_accept(&socket, daemon->listener, ptrans); if (group->mutex) { apr_status_t rv; rv = apr_proc_mutex_unlock(group->mutex); if (rv != APR_SUCCESS) { if (!wsgi_daemon_shutdown) { wsgi_worker_release(); ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server, "mod_wsgi (pid=%d): " "Couldn't release accept mutex '%s'.", getpid(), group->socket); apr_pool_destroy(ptrans); thread->running = 0; break; } } } wsgi_worker_release(); if (status != APR_SUCCESS && APR_STATUS_IS_EINTR(status)) { apr_pool_destroy(ptrans); thread->running = 0; continue; } /* Process the request proxied from the child process. */ bucket_alloc = apr_bucket_alloc_create(ptrans); wsgi_process_socket(ptrans, socket, bucket_alloc, daemon); /* Cleanup ready for next request. */ apr_pool_destroy(ptrans); thread->running = 0; /* Check to see if maximum number of requests reached. */ if (daemon->group->maximum_requests) { if (--wsgi_request_count <= 0) { if (!wsgi_daemon_shutdown) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Maximum requests " "reached '%s'.", getpid(), daemon->group->name); } wsgi_daemon_shutdown++; kill(getpid(), SIGINT); } } } } static void *wsgi_daemon_thread(apr_thread_t *thd, void *data) { WSGIDaemonThread *thread = data; apr_pool_t *p = apr_thread_pool_get(thd); apr_thread_mutex_lock(thread->mutex); wsgi_daemon_worker(p, thread); apr_thread_exit(thd, APR_SUCCESS); return NULL; } static void *wsgi_reaper_thread(apr_thread_t *thd, void *data) { WSGIDaemonProcess *daemon = data; sleep(daemon->group->shutdown_timeout); ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Aborting process '%s'.", getpid(), daemon->group->name); exit(-1); return NULL; } static void *wsgi_deadlock_thread(apr_thread_t *thd, void *data) { WSGIDaemonProcess *daemon = data; PyGILState_STATE gilstate; if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Enable deadlock thread in " "process '%s'.", getpid(), daemon->group->name); } apr_thread_mutex_lock(wsgi_shutdown_lock); wsgi_deadlock_shutdown_time = apr_time_now(); wsgi_deadlock_shutdown_time += wsgi_deadlock_timeout; apr_thread_mutex_unlock(wsgi_shutdown_lock); while (1) { apr_sleep(apr_time_from_sec(1)); gilstate = PyGILState_Ensure(); PyGILState_Release(gilstate); apr_thread_mutex_lock(wsgi_shutdown_lock); wsgi_deadlock_shutdown_time = apr_time_now(); wsgi_deadlock_shutdown_time += wsgi_deadlock_timeout; apr_thread_mutex_unlock(wsgi_shutdown_lock); } return NULL; } static void *wsgi_monitor_thread(apr_thread_t *thd, void *data) { WSGIDaemonProcess *daemon = data; int restart = 0; if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Enable monitor thread in " "process '%s'.", getpid(), daemon->group->name); ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Deadlock timeout is %d.", getpid(), (int)(apr_time_sec(wsgi_deadlock_timeout))); ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Inactivity timeout is %d.", getpid(), (int)(apr_time_sec(wsgi_inactivity_timeout))); } while (1) { apr_time_t now; apr_time_t deadlock_time; apr_time_t inactivity_time; apr_interval_time_t period = 0; now = apr_time_now(); apr_thread_mutex_lock(wsgi_shutdown_lock); deadlock_time = wsgi_deadlock_shutdown_time; inactivity_time = wsgi_inactivity_shutdown_time; apr_thread_mutex_unlock(wsgi_shutdown_lock); if (!restart && wsgi_deadlock_timeout) { if (deadlock_time) { if (deadlock_time <= now) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Daemon process deadlock " "timer expired, stopping process '%s'.", getpid(), daemon->group->name); restart = 1; } else { period = deadlock_time - now; } } else { period = wsgi_deadlock_timeout; } } if (!restart && wsgi_inactivity_timeout) { if (inactivity_time) { if (inactivity_time <= now) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Daemon process " "inactivity timer expired, stopping " "process '%s'.", getpid(), daemon->group->name); restart = 1; } else { if (!period || ((inactivity_time - now) < period)) period = inactivity_time - now; } } else { if (!period || (wsgi_inactivity_timeout < period)) period = wsgi_inactivity_timeout; } } if (restart) { wsgi_daemon_shutdown++; kill(getpid(), SIGINT); } if (restart || period <= 0) period = apr_time_from_sec(1); apr_sleep(period); } return NULL; } static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon) { apr_threadattr_t *thread_attr; apr_thread_t *reaper = NULL; int i; apr_status_t rv; apr_status_t thread_rv; apr_pollfd_t poll_fd; apr_int32_t poll_count = 0; /* * Setup poll object for listening for shutdown notice from * signal handler. */ poll_fd.desc_type = APR_POLL_FILE; poll_fd.reqevents = APR_POLLIN; poll_fd.desc.f = wsgi_signal_pipe_in; /* Initialise maximum request count for daemon. */ if (daemon->group->maximum_requests) wsgi_request_count = daemon->group->maximum_requests; /* Ensure that threads are joinable. */ apr_threadattr_create(&thread_attr, p); apr_threadattr_detach_set(thread_attr, 0); #if (APR_MAJOR_VERSION >= 1) if (daemon->group->stack_size) { apr_threadattr_stacksize_set(thread_attr, daemon->group->stack_size); } #endif /* Start monitoring thread if required. */ wsgi_deadlock_timeout = daemon->group->deadlock_timeout; wsgi_inactivity_timeout = daemon->group->inactivity_timeout; if (wsgi_deadlock_timeout || wsgi_inactivity_timeout) { apr_thread_mutex_create(&wsgi_shutdown_lock, APR_THREAD_MUTEX_UNNESTED, p); rv = apr_thread_create(&reaper, thread_attr, wsgi_monitor_thread, daemon, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server, "mod_wsgi (pid=%d): Couldn't create monitor " "thread in daemon process '%s'.", getpid(), daemon->group->name); } } if (wsgi_deadlock_timeout) { if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server, "mod_wsgi (pid=%d): Couldn't create deadlock " "thread in daemon process '%s'.", getpid(), daemon->group->name); } rv = apr_thread_create(&reaper, thread_attr, wsgi_deadlock_thread, daemon, p); } /* Initialise worker stack. */ wsgi_worker_stack = (WSGIThreadStack *)apr_palloc(p, sizeof(WSGIThreadStack)); wsgi_worker_stack->state = WSGI_STACK_NO_LISTENER | WSGI_STACK_LAST; /* Start the required number of threads. */ wsgi_worker_threads = (WSGIDaemonThread *)apr_pcalloc(p, daemon->group->threads * sizeof(WSGIDaemonThread)); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Starting %d threads in daemon " "process '%s'.", getpid(), daemon->group->threads, daemon->group->name); } for (i=0; igroup->threads; i++) { WSGIDaemonThread *thread = &wsgi_worker_threads[i]; if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Starting thread %d in daemon " "process '%s'.", getpid(), i+1, daemon->group->name); } /* Create the mutex and condition variable for this thread. */ rv = apr_thread_cond_create(&thread->condition, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server, "mod_wsgi (pid=%d): Couldn't create worker " "thread %d state condition variable in daemon " "process '%s'.", getpid(), i, daemon->group->name); /* * Try to force an exit of the process if fail * to create the worker threads. */ kill(getpid(), SIGTERM); sleep(5); } rv = apr_thread_mutex_create(&thread->mutex, APR_THREAD_MUTEX_DEFAULT, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server, "mod_wsgi (pid=%d): Couldn't create worker " "thread %d state mutex variable in daemon " "process '%s'.", getpid(), i, daemon->group->name); /* * Try to force an exit of the process if fail * to create the worker threads. */ kill(getpid(), SIGTERM); sleep(5); } /* Now create the actual thread. */ thread->id = i; thread->process = daemon; thread->running = 0; rv = apr_thread_create(&thread->thread, thread_attr, wsgi_daemon_thread, thread, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server, "mod_wsgi (pid=%d): Couldn't create worker " "thread %d in daemon process '%s'.", getpid(), i, daemon->group->name); /* * Try to force an exit of the process if fail * to create the worker threads. */ kill(getpid(), SIGTERM); sleep(5); } } /* Block until we get a process shutdown signal. */ do { rv = apr_poll(&poll_fd, 1, &poll_count, -1); } while (APR_STATUS_IS_EINTR(rv)); if (wsgi_cpu_time_limit_exceeded) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Exceeded CPU time limit '%s'.", getpid(), daemon->group->name); } ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Shutdown requested '%s'.", getpid(), daemon->group->name); /* * Create a reaper thread to abort process if graceful * shutdown takes too long. Not recommended to disable * this unless external process is controlling shutdown. */ if (daemon->group->shutdown_timeout) { rv = apr_thread_create(&reaper, thread_attr, wsgi_reaper_thread, daemon, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server, "mod_wsgi (pid=%d): Couldn't create reaper " "thread in daemon process '%s'.", getpid(), daemon->group->name); } } /* * Attempt a graceful shutdown by waiting for any * threads which were processing a request at the time * of shutdown. In some respects this is a bit pointless * as even though we allow the requests to be completed, * the Apache child process which proxied the request * through to this daemon process could get killed off * before the daemon process and so the response gets * cut off or lost. */ wsgi_worker_shutdown(); for (i=0; igroup->threads; i++) { if (wsgi_worker_threads[i].thread && wsgi_worker_threads[i].running) { rv = apr_thread_join(&thread_rv, wsgi_worker_threads[i].thread); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server, "mod_wsgi (pid=%d): Couldn't join with " "worker thread %d in daemon process '%s'.", getpid(), i, daemon->group->name); } } } } static apr_status_t wsgi_cleanup_process(void *data) { WSGIProcessGroup *group = (WSGIProcessGroup *)data; /* Only do cleanup if in Apache parent process. */ if (wsgi_parent_pid != getpid()) return APR_SUCCESS; if (group->listener_fd != -1) { if (close(group->listener_fd) < 0) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server, "mod_wsgi (pid=%d): " "Couldn't close unix domain socket '%s'.", getpid(), group->socket); } if (unlink(group->socket) < 0 && errno != ENOENT) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server, "mod_wsgi (pid=%d): " "Couldn't unlink unix domain socket '%s'.", getpid(), group->socket); } } return APR_SUCCESS; } static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon) { apr_status_t status; ap_listen_rec *lr; WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i = 0; if ((status = apr_proc_fork(&daemon->process, p)) < 0) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(errno), wsgi_server, "mod_wsgi: Couldn't spawn process '%s'.", daemon->group->name); return DECLINED; } else if (status == APR_INCHILD) { if (!geteuid()) { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Starting process '%s' with " "uid=%ld, gid=%u and threads=%d.", getpid(), daemon->group->name, (long)daemon->group->uid, (unsigned)daemon->group->gid, daemon->group->threads); } else { ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Starting process '%s' with " "threads=%d.", getpid(), daemon->group->name, daemon->group->threads); } #ifdef HAVE_BINDPROCESSOR /* * By default, AIX binds to a single processor. This * bit unbinds children which will then bind to another * CPU. */ status = bindprocessor(BINDPROCESS, (int)getpid(), PROCESSOR_CLASS_ANY); if (status != OK) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server, "mod_wsgi (pid=%d): Failed to unbind processor.", getpid()); } #endif /* Setup daemon process name displayed by 'ps'. */ wsgi_setup_daemon_name(daemon, p); /* Adjust CPU priority if overridden. */ if (daemon->group->cpu_priority != 0) { if (setpriority(PRIO_PROCESS, 0, daemon->group->cpu_priority) == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(errno), wsgi_server, "mod_wsgi (pid=%d): Couldn't set CPU priority " "in daemon process '%d'.", getpid(), daemon->group->cpu_priority); } } /* Setup daemon process user/group/umask etc. */ wsgi_setup_access(daemon); /* Reinitialise accept mutex in daemon process. */ if (daemon->group->mutex) { status = apr_proc_mutex_child_init(&daemon->group->mutex, daemon->group->mutex_path, p); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server, "mod_wsgi (pid=%d): Couldn't intialise accept " "mutex in daemon process '%s'.", getpid(), daemon->group->mutex_path); /* Don't die immediately to avoid a fork bomb. */ sleep(20); exit(-1); } } /* * Create a lookup table of listener socket address * details so can use it later in daemon when trying * to map request to correct virtual host server. */ wsgi_daemon_listeners = apr_hash_make(p); for (lr = ap_listeners; lr; lr = lr->next) { char *key; char *host; apr_port_t port; host = lr->bind_addr->hostname; port = lr->bind_addr->port; if (!host) host = ""; key = apr_psprintf(p, "%s|%d", host, port); apr_hash_set(wsgi_daemon_listeners, key, APR_HASH_KEY_STRING, lr->bind_addr); } /* * Close child copy of the listening sockets for the * Apache parent process so we don't interfere with * the parent process. */ ap_close_listeners(); /* * Cleanup the Apache scoreboard to ensure that any * shared memory segments or memory mapped files not * available to code in daemon processes. */ /* * XXX If this is closed, under Apache 2.4 then daemon * mode processes will crash. Not much choice but to * leave it open. Daemon mode really needs to be * rewritten not to use normal Apache request object and * output bucket chain to avoid potential for problems. */ #if 0 ap_cleanup_scoreboard(0); #endif /* * Wipe out random value used in magic token so that not * possible for user code running in daemon process to * discover this value for other daemon process groups. * In other words, wipe out all but our own. */ entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (entry != daemon->group) entry->random = 0; } /* * Close listener socket for daemon processes for other * daemon process groups. In other words, close all but * our own. */ entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (entry != daemon->group && entry->listener_fd != -1) { close(entry->listener_fd); entry->listener_fd = -1; } } /* * Register signal handler to receive shutdown signal * from Apache parent process. We need to first create * pipe by which signal handler can notify the main * thread that signal has arrived indicating that * process needs to shutdown. */ status = apr_file_pipe_create(&wsgi_signal_pipe_in, &wsgi_signal_pipe_out, p); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_EMERG(status), wsgi_server, "mod_wsgi (pid=%d): Couldn't initialise signal " "pipe in daemon process '%s'.", getpid(), daemon->group->name); sleep(20); exit(-1); } wsgi_daemon_shutdown = 0; apr_signal(SIGINT, wsgi_signal_handler); apr_signal(SIGTERM, wsgi_signal_handler); #ifdef SIGXCPU apr_signal(SIGXCPU, wsgi_signal_handler); #endif /* Set limits on amount of CPU time that can be used. */ if (daemon->group->cpu_time_limit > 0) { struct rlimit limit; int result = -1; limit.rlim_cur = daemon->group->cpu_time_limit; limit.rlim_max = daemon->group->cpu_time_limit + 1; limit.rlim_max += daemon->group->shutdown_timeout; #if defined(RLIMIT_CPU) result = setrlimit(RLIMIT_CPU, &limit); #endif if (result == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server, "mod_wsgi (pid=%d): Couldn't set CPU time " "limit of %d seconds for process '%s'.", getpid(), daemon->group->cpu_time_limit, daemon->group->name); } } /* * Set limits on amount of date segment memory that can * be used. Although this is done, some platforms * doesn't actually support it. */ if (daemon->group->memory_limit > 0) { struct rlimit limit; int result = -1; limit.rlim_cur = daemon->group->memory_limit; limit.rlim_max = daemon->group->memory_limit; #if defined(RLIMIT_DATA) result = setrlimit(RLIMIT_DATA, &limit); #endif if (result == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server, "mod_wsgi (pid=%d): Couldn't set memory time " "limit of %ld for process '%s'.", getpid(), (long)daemon->group->memory_limit, daemon->group->name); } } /* * Set limits on amount of virtual memory that can be used. * Although this is done, some platforms doesn't actually * support it. */ if (daemon->group->virtual_memory_limit > 0) { struct rlimit limit; int result = -1; limit.rlim_cur = daemon->group->virtual_memory_limit; limit.rlim_max = daemon->group->virtual_memory_limit; #if defined(RLIMIT_AS) result = setrlimit(RLIMIT_AS, &limit); #elif defined(RLIMIT_VMEM) result = setrlimit(RLIMIT_VMEM, &limit); #endif if (result == -1) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(0), wsgi_server, "mod_wsgi (pid=%d): Couldn't set virtual memory " "time limit of %ld for process '%s'.", getpid(), (long)daemon->group->virtual_memory_limit, daemon->group->name); } } /* * Flag whether multiple daemon processes or denoted * that requests could be spread across multiple daemon * process groups. */ wsgi_multiprocess = daemon->group->multiprocess; wsgi_multithread = daemon->group->threads != 1; /* * Create a pool for the child daemon process so * we can trigger various events off it at shutdown. */ apr_pool_create(&wsgi_daemon_pool, p); /* * Retain a reference to daemon process details. Do * this here as when doing lazy initialisation of * the interpreter we want to know if in a daemon * process so can pick any daemon process specific * home directory for Python installation. */ wsgi_daemon_group = daemon->group->name; wsgi_daemon_process = daemon; /* Set lang/locale if specified for daemon process. */ if (daemon->group->lang) { char *envvar = apr_pstrcat(p, "LANG=", daemon->group->lang, NULL); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Setting lang to %s.", getpid(), daemon->group->lang); putenv(envvar); } if (daemon->group->locale) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Setting locale to %s.", getpid(), daemon->group->locale); setlocale(LC_ALL, daemon->group->locale); } /* * Initialise Python if required to be done in the child * process. Note that it will not be initialised if * mod_python loaded and it has already been done. */ if (wsgi_python_after_fork) wsgi_python_init(p); #if PY_MAJOR_VERSION < 3 /* * If mod_python is also being loaded and thus it was * responsible for initialising Python it can leave in * place an active thread state. Under normal conditions * this would be eliminated in Apache child process by * the time that mod_wsgi got to do its own child * initialisation but in daemon process we skip the * mod_python child initialisation so the active thread * state still exists. Thus need to do a bit of a fiddle * to ensure there is no active thread state. Don't need * to worry about this with Python 3.X as mod_python * only supports Python 2.X. */ if (!wsgi_python_initialized) { PyGILState_STATE state; PyEval_AcquireLock(); state = PyGILState_Ensure(); PyGILState_Release(state); if (state == PyGILState_LOCKED) PyThreadState_Swap(NULL); PyEval_ReleaseLock(); } #endif /* * If the daemon is associated with a virtual host then * we can close all other error logs so long as they * aren't the same one as being used for the virtual * host. If the virtual host error log is different to * the main server error log, then also tie stderr to * that log file instead. This way any debugging sent * direct to stderr from C code also goes to the virtual * host error log. We close the error logs that aren't * required as that eliminates possibility that user * code executing in daemon process could maliciously * dump messages into error log for a different virtual * host, as well as stop them being reopened with mode * that would allow seeking back to start of file and * read any information in them. */ if (daemon->group->server->is_virtual) { server_rec *server = NULL; apr_file_t *errfile = NULL; /* * Iterate over all servers and close any error * logs different to that for virtual host. Note that * if errors are being redirected to syslog, then * the server error log reference will actually be * a null pointer, so need to ensure that check for * that and don't attempt to close it in that case. */ server = wsgi_server; while (server != NULL) { if (server->error_log && server->error_log != daemon->group->server->error_log) { apr_file_close(server->error_log); } server = server->next; } /* * Reassociate stderr output with error log from the * virtual host the daemon is associated with. Close * the virtual host error log and point it at stderr * log instead. Do the latter so don't get two * references to same open file. Just in case * anything still accesses error log of main server, * map main server error log to that of the virtual * host. Note that cant do this if errors are being * redirected to syslog, as indicated by virtual * host error log being a null pointer. In that case * just leave everything as it was. Also can't remap * the error log for main server if it was being * redirected to syslog but virtual host wasn't. */ if (daemon->group->server->error_log && daemon->group->server->error_log != wsgi_server->error_log) { apr_file_t *oldfile = NULL; apr_file_open_stderr(&errfile, wsgi_server->process->pool); apr_file_dup2(errfile, daemon->group->server->error_log, wsgi_server->process->pool); oldfile = daemon->group->server->error_log; server = wsgi_server; while (server != NULL) { if (server->error_log == oldfile) server->error_log = errfile; server = server->next; } apr_file_close(oldfile); if (wsgi_server->error_log) wsgi_server->error_log = errfile; } } /* * Update reference to server object in case daemon * process is actually associated with a virtual host. * This way all logging actually goes into the virtual * hosts log file. */ if (daemon->group->server) { if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Process '%s' logging to " "'%s'.", getpid(), daemon->group->name, daemon->group->server->server_hostname); } wsgi_server = daemon->group->server; } else { if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Process '%s' forced to log " "to '%s'.", getpid(), daemon->group->name, wsgi_server->server_hostname); } } /* * Setup Python in the child daemon process. Note that * we ensure that we are now marked as the original * initialiser of the Python interpreter even though * mod_python might have done it, as we will be the one * to cleanup the child daemon process and not * mod_python. We also need to perform the special * Python setup which has to be done after a fork. */ wsgi_python_initialized = 1; wsgi_python_path = daemon->group->python_path; wsgi_python_eggs = daemon->group->python_eggs; wsgi_python_child_init(wsgi_daemon_pool); /* * Create socket wrapper for listener file descriptor * and mutex for controlling which thread gets to * perform the accept() when a connection is ready. */ apr_os_sock_put(&daemon->listener, &daemon->group->listener_fd, p); /* Run the main routine for the daemon process. */ wsgi_daemon_main(p, daemon); /* * Destroy the pool for the daemon process. This will * have the side affect of also destroying Python. */ ap_log_error(APLOG_MARK, WSGI_LOG_INFO(0), wsgi_server, "mod_wsgi (pid=%d): Stopping process '%s'.", getpid(), daemon->group->name); apr_pool_destroy(wsgi_daemon_pool); /* Exit the daemon process when being shutdown. */ exit(-1); } apr_pool_note_subprocess(p, &daemon->process, APR_KILL_AFTER_TIMEOUT); apr_proc_other_child_register(&daemon->process, wsgi_manage_process, daemon, NULL, p); return OK; } static int wsgi_start_daemons(apr_pool_t *p) { WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; WSGIDaemonProcess *process = NULL; int mpm_generation = 0; int i, j; /* Do we need to create any daemon processes. */ if (!wsgi_daemon_list) return OK; /* What server generation is this. */ #if defined(AP_MPMQ_GENERATION) ap_mpm_query(AP_MPMQ_GENERATION, &mpm_generation); #else mpm_generation = ap_my_generation; #endif /* * Cache references to root server and pool as will need * to access these when restarting daemon process when * they die. */ wsgi_parent_pool = p; /* * Startup in turn the required number of daemon processes * for each of the named process groups. */ wsgi_daemon_index = apr_hash_make(p); entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { int status; entry = &entries[i]; /* * Check for whether the daemon process user and * group are the default Apache values. If they are * then reset them to the current values configured for * Apache. This is to work around where the User/Group * directives had not been set before the WSGIDaemonProcess * directive was used in configuration file. In this case, * where no 'user' and 'group' options were provided, * the default values would have been used, but these * were later overridden thus why we need to update it. */ if (entry->uid == ap_uname2id(DEFAULT_USER)) { entry->uid = ap_unixd_config.user_id; entry->user = ap_unixd_config.user_name; ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Reset default user for " "daemon process group '%s' to uid=%ld.", getpid(), entry->name, (long)entry->uid); } if (entry->gid == ap_gname2id(DEFAULT_GROUP)) { entry->gid = ap_unixd_config.group_id; ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Reset default group for " "daemon process group '%s' to gid=%ld.", getpid(), entry->name, (long)entry->gid); } /* * Calculate path for socket to accept requests on and * create the socket. */ entry->socket = apr_psprintf(p, "%s.%d.%d.%d.sock", wsgi_server_config->socket_prefix, getpid(), mpm_generation, entry->id); apr_hash_set(wsgi_daemon_index, entry->name, APR_HASH_KEY_STRING, entry); entry->listener_fd = wsgi_setup_socket(entry); if (entry->listener_fd == -1) return DECLINED; /* * Register cleanup so that listener socket is cleaned * up properly on a restart and on shutdown. */ apr_pool_cleanup_register(p, entry, wsgi_cleanup_process, apr_pool_cleanup_null); /* * If there is more than one daemon process in the group * then need to create an accept mutex for the daemon * processes to use so they don't interfere with each * other. */ if (entry->processes > 1) { entry->mutex_path = apr_psprintf(p, "%s.%d.%d.%d.lock", wsgi_server_config->socket_prefix, getpid(), mpm_generation, entry->id); status = apr_proc_mutex_create(&entry->mutex, entry->mutex_path, wsgi_server_config->lock_mechanism, p); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno), wsgi_server, "mod_wsgi (pid=%d): Couldn't create accept " "lock '%s' (%d).", getpid(), entry->mutex_path, wsgi_server_config->lock_mechanism); return DECLINED; } /* * Depending on the locking mechanism being used * need to change the permissions of the lock. Can't * use unixd_set_proc_mutex_perms() as it uses the * default Apache child process uid/gid where the * daemon process uid/gid can be different. */ if (!geteuid()) { #if APR_HAS_SYSVSEM_SERIALIZE if (!strcmp(apr_proc_mutex_name(entry->mutex), "sysvsem")) { apr_os_proc_mutex_t ospmutex; #if !APR_HAVE_UNION_SEMUN union semun { long val; struct semid_ds *buf; unsigned short *array; }; #endif union semun ick; struct semid_ds buf; apr_os_proc_mutex_get(&ospmutex, entry->mutex); buf.sem_perm.uid = entry->uid; buf.sem_perm.gid = entry->gid; buf.sem_perm.mode = 0600; ick.buf = &buf; if (semctl(ospmutex.crossproc, 0, IPC_SET, ick) < 0) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno), wsgi_server, "mod_wsgi (pid=%d): " "Couldn't set permissions on accept " "mutex '%s' (sysvsem).", getpid(), entry->mutex_path); return DECLINED; } } #endif #if APR_HAS_FLOCK_SERIALIZE if (!strcmp(apr_proc_mutex_name(entry->mutex), "flock")) { if (chown(entry->mutex_path, entry->uid, -1) < 0) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(errno), wsgi_server, "mod_wsgi (pid=%d): " "Couldn't set permissions on accept " "mutex '%s' (flock).", getpid(), entry->mutex_path); return DECLINED; } } #endif } } /* Create the actual required daemon processes. */ for (j = 1; j <= entry->processes; j++) { process = (WSGIDaemonProcess *)apr_pcalloc(p, sizeof( WSGIDaemonProcess)); process->group = entry; process->instance = j; status = wsgi_start_process(p, process); if (status != OK) return status; } } return OK; } static apr_status_t wsgi_close_socket(void *data) { WSGIDaemonSocket *daemon = NULL; daemon = (WSGIDaemonSocket *)data; return close(daemon->fd); } static int wsgi_connect_daemon(request_rec *r, WSGIDaemonSocket *daemon) { struct sockaddr_un addr; int retries = 0; apr_interval_time_t timer = 0; memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; apr_cpystrn(addr.sun_path, daemon->socket, sizeof addr.sun_path); while (1) { retries++; if ((daemon->fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r, "mod_wsgi (pid=%d): Unable to create socket to " "connect to WSGI daemon process.", getpid()); return HTTP_INTERNAL_SERVER_ERROR; } if (connect(daemon->fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { if (errno == ECONNREFUSED && retries < WSGI_CONNECT_ATTEMPTS) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r, "mod_wsgi (pid=%d): Connection attempt #%d to " "WSGI daemon process '%s' on '%s' failed, " "sleeping before retrying again.", getpid(), retries, daemon->name, daemon->socket); close(daemon->fd); /* * Progressively increase time we wait between * connection attempts. Start at 0.1 second and * double each time but apply ceiling at 2.0 * seconds. */ if (!timer) timer = apr_time_make(0, 100000); apr_sleep(timer); timer = (2 * timer) % apr_time_make(2, 0); } else { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(errno), r, "mod_wsgi (pid=%d): Unable to connect to " "WSGI daemon process '%s' on '%s' after " "multiple attempts.", getpid(), daemon->name, daemon->socket); close(daemon->fd); return HTTP_SERVICE_UNAVAILABLE; } } else { apr_pool_cleanup_register(r->pool, daemon, wsgi_close_socket, apr_pool_cleanup_null); break; } } return OK; } static apr_status_t wsgi_socket_send(int fd, const void *buf, size_t buf_size) { int rc; do { rc = write(fd, buf, buf_size); } while (rc < 0 && errno == EINTR); if (rc < 0) { return errno; } return APR_SUCCESS; } static apr_status_t wsgi_send_strings(apr_pool_t *p, int fd, const char **s) { apr_status_t rv; apr_size_t total = 0; apr_size_t n; apr_size_t i; apr_size_t l; char *buffer; char *offset; total += sizeof(n); for (n = 0; s[n]; n++) total += (strlen(s[n]) + 1); buffer = apr_palloc(p, total + sizeof(total)); offset = buffer; memcpy(offset, &total, sizeof(total)); offset += sizeof(total); memcpy(offset, &n, sizeof(n)); offset += sizeof(n); for (i = 0; i < n; i++) { l = (strlen(s[i]) + 1); memcpy(offset, s[i], l); offset += l; } total += sizeof(total); if ((rv = wsgi_socket_send(fd, buffer, total)) != APR_SUCCESS) return rv; return APR_SUCCESS; } static apr_status_t wsgi_send_request(request_rec *r, WSGIRequestConfig *config, WSGIDaemonSocket *daemon) { int rv; char **vars; const apr_array_header_t *env_arr; const apr_table_entry_t *elts; int i, j; /* Send subprocess environment from request object. */ env_arr = apr_table_elts(r->subprocess_env); elts = (const apr_table_entry_t *)env_arr->elts; vars = (char **)apr_palloc(r->pool, ((2*env_arr->nelts)+1)*sizeof(char *)); for (i=0, j=0; inelts; ++i) { if (!elts[i].key) continue; vars[j++] = elts[i].key; vars[j++] = elts[i].val ? elts[i].val : ""; } vars[j] = NULL; rv = wsgi_send_strings(r->pool, daemon->fd, (const char **)vars); if (rv != APR_SUCCESS) return rv; return APR_SUCCESS; } static void wsgi_discard_output(apr_bucket_brigade *bb) { apr_bucket *e; const char *buf; apr_size_t len; apr_status_t rv; for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (APR_BUCKET_IS_EOS(e)) { break; } rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { break; } } } static int wsgi_execute_remote(request_rec *r) { WSGIRequestConfig *config = NULL; WSGIDaemonSocket *daemon = NULL; WSGIProcessGroup *group = NULL; char *key = NULL; const char *hash = NULL; int status; apr_status_t rv; apr_interval_time_t timeout; int seen_eos; int child_stopped_reading; apr_file_t *tmpsock; apr_bucket_brigade *bbout; apr_bucket_brigade *bbin; apr_bucket *b; const char *location = NULL; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); /* * Only allow the process group to match against a restricted * set of processes if such a restricted set has been defined. */ if (config->restrict_process) { if (!apr_table_get(config->restrict_process, config->process_group)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Daemon " "process called '%s' cannot be " "accessed by this WSGI application", config->process_group), r->filename); return HTTP_INTERNAL_SERVER_ERROR; } } /* * Do not process request as remote if actually targeted at * the main Apache processes. */ if (!*config->process_group) return DECLINED; /* Grab details of matching process group. */ if (!wsgi_daemon_index) { wsgi_log_script_error(r, apr_psprintf(r->pool, "No WSGI daemon " "process called '%s' has been configured", config->process_group), r->filename); return HTTP_INTERNAL_SERVER_ERROR; } group = (WSGIProcessGroup *)apr_hash_get(wsgi_daemon_index, config->process_group, APR_HASH_KEY_STRING); if (!group) { wsgi_log_script_error(r, apr_psprintf(r->pool, "No WSGI daemon " "process called '%s' has been configured", config->process_group), r->filename); return HTTP_INTERNAL_SERVER_ERROR; } /* * Only allow the process group to match against a daemon * process defined within a virtual host with the same * server name or a daemon process defined at global server * scope. */ if (group->server != r->server && group->server != wsgi_server) { if (strcmp(group->server->server_hostname, r->server->server_hostname) != 0) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Daemon " "process called '%s' cannot be " "accessed by this WSGI application", config->process_group), r->filename); return HTTP_INTERNAL_SERVER_ERROR; } } /* * Check restrictions related to the group of the WSGI * script file and who has write access to the directory it * is contained in. If not satisfied forbid access. */ if (group->script_group) { apr_uid_t gid; struct group *grent = NULL; const char *grname = NULL; apr_finfo_t finfo; const char *path = NULL; if (!(r->finfo.valid & APR_FINFO_GROUP)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Group " "information not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } gid = r->finfo.group; if ((grent = getgrgid(gid)) == NULL) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Couldn't " "determine group of WSGI script file, " "gid=%ld", (long)gid), r->filename); return HTTP_FORBIDDEN; } grname = grent->gr_name; if (strcmp(group->script_group, grname)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Group of WSGI " "script file does not match required group " "for daemon process, group=%s", grname), r->filename); return HTTP_FORBIDDEN; } if (!(r->finfo.valid & APR_FINFO_WPROT)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "World " "permissions not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } if (r->finfo.protection & APR_FPROT_WWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "WSGI script " "file is writable to world"), r->filename); return HTTP_FORBIDDEN; } path = ap_make_dirstr_parent(r->pool, r->filename); if (apr_stat(&finfo, path, APR_FINFO_NORM, r->pool) != APR_SUCCESS) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Unable to stat " "parent directory of WSGI script"), path); return HTTP_FORBIDDEN; } gid = finfo.group; if ((grent = getgrgid(gid)) == NULL) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Couldn't " "determine group of parent directory of " "WSGI script file, gid=%ld", (long)gid), r->filename); return HTTP_FORBIDDEN; } grname = grent->gr_name; if (strcmp(group->script_group, grname)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Group of parent " "directory of WSGI script file does not " "match required group for daemon process, " "group=%s", grname), r->filename); return HTTP_FORBIDDEN; } if (finfo.protection & APR_FPROT_WWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Parent directory " "of WSGI script file is writable to world"), r->filename); return HTTP_FORBIDDEN; } } /* * Check restrictions related to who can be the owner of * the WSGI script file and who has write access to the * directory it is contained in. If not satisfied forbid * access. */ if (group->script_user) { apr_uid_t uid; struct passwd *pwent = NULL; const char *pwname = NULL; apr_finfo_t finfo; const char *path = NULL; if (!(r->finfo.valid & APR_FINFO_USER)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "User " "information not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } uid = r->finfo.user; if ((pwent = getpwuid(uid)) == NULL) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Couldn't " "determine owner of WSGI script file, " "uid=%ld", (long)uid), r->filename); return HTTP_FORBIDDEN; } pwname = pwent->pw_name; if (strcmp(group->script_user, pwname)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Owner of WSGI " "script file does not match required user " "for daemon process, user=%s", pwname), r->filename); return HTTP_FORBIDDEN; } if (!(r->finfo.valid & APR_FINFO_GPROT)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Group " "permissions not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } if (r->finfo.protection & APR_FPROT_GWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "WSGI script " "file is writable to group"), r->filename); return HTTP_FORBIDDEN; } if (!(r->finfo.valid & APR_FINFO_WPROT)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "World " "permissions not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } if (r->finfo.protection & APR_FPROT_WWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "WSGI script " "file is writable to world"), r->filename); return HTTP_FORBIDDEN; } path = ap_make_dirstr_parent(r->pool, r->filename); if (apr_stat(&finfo, path, APR_FINFO_NORM, r->pool) != APR_SUCCESS) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Unable to stat " "parent directory of WSGI script"), path); return HTTP_FORBIDDEN; } uid = finfo.user; if ((pwent = getpwuid(uid)) == NULL) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Couldn't " "determine owner of parent directory of " "WSGI script file, uid=%ld", (long)uid), r->filename); return HTTP_FORBIDDEN; } pwname = pwent->pw_name; if (strcmp(group->script_user, pwname)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Owner of parent " "directory of WSGI script file does not " "match required user for daemon process, " "user=%s", pwname), r->filename); return HTTP_FORBIDDEN; } if (finfo.protection & APR_FPROT_WWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Parent directory " "of WSGI script file is writable to world"), r->filename); return HTTP_FORBIDDEN; } if (finfo.protection & APR_FPROT_GWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Parent directory " "of WSGI script file is writable to group"), r->filename); return HTTP_FORBIDDEN; } } /* * Add magic marker into request environment so that daemon * process can verify that request is from a sender that can * be trusted. Wipe out original key to make it a bit harder * for rogue code in Apache child processes to trawl through * memory looking for unhashed string. */ key = apr_psprintf(r->pool, "%ld|%s|%s|%s", group->random, group->socket, r->filename, config->handler_script); hash = ap_md5(r->pool, (const unsigned char *)key); memset(key, '\0', strlen(key)); apr_table_setn(r->subprocess_env, "mod_wsgi.magic", hash); /* Create connection to the daemon process. */ daemon = (WSGIDaemonSocket *)apr_pcalloc(r->pool, sizeof(WSGIDaemonSocket)); daemon->name = config->process_group; daemon->socket = group->socket; if ((status = wsgi_connect_daemon(r, daemon)) != OK) return status; /* Send request details and subprocess environment. */ if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Request server was " "'%s|%d'.", getpid(), r->server->server_hostname, r->server->port); } if ((rv = wsgi_send_request(r, config, daemon)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r, "mod_wsgi (pid=%d): Unable to send request details " "to WSGI daemon process '%s' on '%s'.", getpid(), daemon->name, daemon->socket); return HTTP_INTERNAL_SERVER_ERROR; } /* * Wrap the socket in an APR file object so that socket can * be more easily written to and so that pipe bucket can be * created later for reading from it. Note the file object is * initialised such that it will close socket when no longer * required so can kill off registration done at higher * level to close socket. */ apr_os_pipe_put_ex(&tmpsock, &daemon->fd, 1, r->pool); apr_pool_cleanup_kill(r->pool, daemon, wsgi_close_socket); apr_file_pipe_timeout_get(tmpsock, &timeout); apr_file_pipe_timeout_set(tmpsock, r->server->timeout); /* Setup bucket brigade for reading response from daemon. */ bbin = apr_brigade_create(r->pool, r->connection->bucket_alloc); b = apr_bucket_pipe_create(tmpsock, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbin, b); b = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbin, b); /* * If process reload mechanism enabled, then we need to look * for marker indicating it is okay to transfer content, or * whether process is being restarted and that we should * therefore create a connection to daemon process again. */ if (*config->process_group) { int retries = 0; int maximum = (2*group->processes)+1; /* * While special header indicates a restart is being * done, then keep trying to reconnect. Cap the number * of retries to at most about 2 times the number of * daemon processes in the process group. If still being * told things are being restarted, then we will error * indicating service is unavailable. */ while (retries < maximum) { /* Scan the CGI script like headers from daemon. */ if ((status = ap_scan_script_header_err_brigade(r, bbin, NULL))) return HTTP_INTERNAL_SERVER_ERROR; /* * Status must be 200 for our special headers. Ideally * we would use 0 as did in the past but Apache 2.4 * complains if use 0 as not a valid status value. */ if (r->status != 200) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Unexpected status from " "WSGI daemon process '%d'.", getpid(), r->status); return HTTP_INTERNAL_SERVER_ERROR; } if (!strcmp(r->status_line, "200 Continue")) break; if (strcmp(r->status_line, "200 Rejected")) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Unexpected status from " "WSGI daemon process '%d'.", getpid(), r->status); return HTTP_INTERNAL_SERVER_ERROR; } /* Need to close previous socket connection first. */ apr_file_close(tmpsock); /* Has maximum number of attempts been reached. */ if (retries == maximum) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r, "mod_wsgi (pid=%d): Maximum number of WSGI " "daemon process restart connects reached '%d'.", getpid(), maximum); return HTTP_SERVICE_UNAVAILABLE; } retries++; ap_log_rerror(APLOG_MARK, WSGI_LOG_INFO(0), r, "mod_wsgi (pid=%d): Connect after WSGI daemon " "process restart, attempt #%d.", getpid(), retries); /* Connect and setup connection just like before. */ if ((status = wsgi_connect_daemon(r, daemon)) != OK) return status; if ((rv = wsgi_send_request(r, config, daemon)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r, "mod_wsgi (pid=%d): Unable to send request " "details to WSGI daemon process '%s' on '%s'.", getpid(), daemon->name, daemon->socket); return HTTP_INTERNAL_SERVER_ERROR; } apr_os_pipe_put_ex(&tmpsock, &daemon->fd, 1, r->pool); apr_pool_cleanup_kill(r->pool, daemon, wsgi_close_socket); apr_file_pipe_timeout_get(tmpsock, &timeout); apr_file_pipe_timeout_set(tmpsock, r->server->timeout); apr_brigade_destroy(bbin); bbin = apr_brigade_create(r->pool, r->connection->bucket_alloc); b = apr_bucket_pipe_create(tmpsock, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbin, b); b = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbin, b); } } /* * Need to reset request status value to HTTP_OK else it * screws up HTTP input filter when processing a POST * request with 100-continue requirement. */ r->status = HTTP_OK; /* Transfer any request content which was provided. */ seen_eos = 0; child_stopped_reading = 0; bbout = apr_brigade_create(r->pool, r->connection->bucket_alloc); do { apr_bucket *bucket; rv = ap_get_brigade(r->input_filters, bbout, AP_MODE_READBYTES, APR_BLOCK_READ, HUGE_STRING_LEN); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(rv), r, "mod_wsgi (pid=%d): Unable to get bucket brigade " "for request.", getpid()); return HTTP_INTERNAL_SERVER_ERROR; } for (bucket = APR_BRIGADE_FIRST(bbout); bucket != APR_BRIGADE_SENTINEL(bbout); bucket = APR_BUCKET_NEXT(bucket)) { const char *data; apr_size_t len; if (APR_BUCKET_IS_EOS(bucket)) { seen_eos = 1; break; } /* We can't do much with this. */ if (APR_BUCKET_IS_FLUSH(bucket)) { continue; } /* If the child stopped, we still must read to EOS. */ if (child_stopped_reading) { continue; } /* Read block. */ apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); /* * Keep writing data to the child until done or too * much time elapses with no progress or an error * occurs. */ rv = apr_file_write_full(tmpsock, data, len, NULL); if (rv != APR_SUCCESS) { /* Daemon stopped reading, discard remainder. */ child_stopped_reading = 1; } } apr_brigade_cleanup(bbout); } while (!seen_eos); apr_file_pipe_timeout_set(tmpsock, timeout); /* * Close socket for writing so that daemon detects end of * request content. */ shutdown(daemon->fd, 1); /* Scan the CGI script like headers from daemon. */ if ((status = ap_scan_script_header_err_brigade(r, bbin, NULL))) return HTTP_INTERNAL_SERVER_ERROR; /* * Look for the special case of status being 200 but the * status line indicating an error and translate it into a * 500 error so that error document processing will occur * for those cases where WSGI application wouldn't have * supplied their own error document. We used to use 0 * here for status but Apache 2.4 prohibits it now. */ if (r->status == 200 && !strcmp(r->status_line, "200 Error")) return HTTP_INTERNAL_SERVER_ERROR; /* * Look for 'Location' header and if an internal * redirect, execute the redirect. This behaviour is * consistent with how mod_cgi and mod_cgid work and * what is permitted by the CGI specification. */ location = apr_table_get(r->headers_out, "Location"); if (location && location[0] == '/' && r->status == 200) { /* * Discard all response content returned from * the daemon process. */ wsgi_discard_output(bbin); apr_brigade_destroy(bbin); /* * The internal redirect needs to be a GET no * matter what the original method was. */ r->method = apr_pstrdup(r->pool, "GET"); r->method_number = M_GET; /* * We already read the message body (if any), so * don't allow the redirected request to think * it has one. Not sure if we need to worry * about removing 'Transfer-Encoding' header. */ apr_table_unset(r->headers_in, "Content-Length"); ap_internal_redirect_handler(location, r); return OK; } /* * If multiple WWW-Authenticate headers they would * have been merged and although this is allowed by * HTTP specifications, some HTTP clients don't * like that, so split them into separate headers * again. */ if (apr_table_get(r->err_headers_out, "WWW-Authenticate")) { const char* value = NULL; const char* item = NULL; value = apr_table_get(r->err_headers_out, "WWW-Authenticate"); apr_table_unset(r->err_headers_out, "WWW-Authenticate"); while (*value) { item = ap_getword(r->pool, &value, ','); while (*item && apr_isspace(*item)) item++; if (*item) apr_table_add(r->err_headers_out, "WWW-Authenticate", item); } } /* * Allow the web server to override any error * page produced by the WSGI application. */ if (config->error_override && ap_is_HTTP_ERROR(r->status)) { status = r->status; r->status = HTTP_OK; r->status_line = NULL; /* * Discard all response content returned from * the daemon process if any expected. */ if (!r->header_only && /* not HEAD request */ (status != HTTP_NO_CONTENT) && /* not 204 */ (status != HTTP_NOT_MODIFIED)) { /* not 304 */ wsgi_discard_output(bbin); apr_brigade_destroy(bbin); } return status; } /* Transfer any response content. */ ap_pass_brigade(r->output_filters, bbin); return OK; } static apr_status_t wsgi_socket_read(apr_socket_t *sock, void *vbuf, apr_size_t size) { char *buf = vbuf; apr_status_t rv; apr_size_t count = 0; apr_size_t len = 0; do { len = size - count; if ((rv = apr_socket_recv(sock, buf + count, &len)) != APR_SUCCESS) return rv; count += len; } while (count < size); return APR_SUCCESS; } static apr_status_t wsgi_read_strings(apr_socket_t *sock, char ***s, apr_pool_t *p) { apr_status_t rv; apr_size_t total; apr_size_t n; apr_size_t i; apr_size_t l; char *buffer; char *offset; if ((rv = wsgi_socket_read(sock, &total, sizeof(total))) != APR_SUCCESS) return rv; buffer = apr_palloc(p, total); offset = buffer; if ((rv = wsgi_socket_read(sock, buffer, total)) != APR_SUCCESS) return rv; memcpy(&n, offset, sizeof(n)); offset += sizeof(n); *s = apr_pcalloc(p, (n+1)*sizeof(**s)); for (i = 0; i < n; i++) { l = strlen(offset) + 1; (*s)[i] = offset; offset += l; } return APR_SUCCESS; } static apr_status_t wsgi_read_request(apr_socket_t *sock, request_rec *r) { int rv; char **vars; /* Read subprocess environment from request object. */ rv = wsgi_read_strings(sock, &vars, r->pool); if (rv != APR_SUCCESS) return rv; while (*vars) { char *key = *vars++; apr_table_setn(r->subprocess_env, key, *vars++); } return APR_SUCCESS; } static ap_filter_rec_t *wsgi_header_filter_handle; static apr_status_t wsgi_header_filter(ap_filter_t *f, apr_bucket_brigade *b) { request_rec *r = f->r; struct iovec vec1[4]; apr_bucket_brigade *b2; char crlf[] = CRLF; apr_size_t buflen; const apr_array_header_t *elts; const apr_table_entry_t *t_elt; const apr_table_entry_t *t_end; struct iovec *vec2; struct iovec *vec2_next; /* Output status line. */ vec1[0].iov_base = (void *)"Status:"; vec1[0].iov_len = strlen("Status:"); vec1[1].iov_base = (void *)" "; vec1[1].iov_len = sizeof(" ") - 1; vec1[2].iov_base = (void *)(r->status_line); vec1[2].iov_len = strlen(r->status_line); vec1[3].iov_base = (void *)CRLF; vec1[3].iov_len = sizeof(CRLF) - 1; b2 = apr_brigade_create(r->pool, r->connection->bucket_alloc); apr_brigade_writev(b2, NULL, NULL, vec1, 4); /* Merge response header tables together. */ if (!apr_is_empty_table(r->err_headers_out)) { r->headers_out = apr_table_overlay(r->pool, r->err_headers_out, r->headers_out); } /* Override the content type for response. */ if (r->content_type) apr_table_setn(r->headers_out, "Content-Type", r->content_type); /* Formt the response headers for output. */ elts = apr_table_elts(r->headers_out); if (elts->nelts != 0) { t_elt = (const apr_table_entry_t *)(elts->elts); t_end = t_elt + elts->nelts; vec2 = (struct iovec *)apr_palloc(r->pool, 4 * elts->nelts * sizeof(struct iovec)); vec2_next = vec2; do { vec2_next->iov_base = (void*)(t_elt->key); vec2_next->iov_len = strlen(t_elt->key); vec2_next++; vec2_next->iov_base = ": "; vec2_next->iov_len = sizeof(": ") - 1; vec2_next++; vec2_next->iov_base = (void*)(t_elt->val); vec2_next->iov_len = strlen(t_elt->val); vec2_next++; vec2_next->iov_base = CRLF; vec2_next->iov_len = sizeof(CRLF) - 1; vec2_next++; t_elt++; } while (t_elt < t_end); apr_brigade_writev(b2, NULL, NULL, vec2, vec2_next - vec2); } /* Format terminating blank line for response headers. */ buflen = strlen(crlf); apr_brigade_write(b2, NULL, NULL, crlf, buflen); /* Output the response headers. */ ap_pass_brigade(f->next, b2); /* Remove ourselves from filter chain so we aren't called again. */ ap_remove_output_filter(f); /* Output the partial response content. */ return ap_pass_brigade(f->next, b); } static int wsgi_hook_daemon_handler(conn_rec *c) { apr_socket_t *csd; request_rec *r; apr_pool_t *p; apr_status_t rv; char *key; apr_sockaddr_t *addr; const char *filename; const char *script; const char *magic; const char *hash; WSGIRequestConfig *config; apr_bucket *e; apr_bucket_brigade *bb; core_request_config *req_cfg; ap_filter_t *current = NULL; ap_filter_t *next = NULL; const apr_array_header_t *head = NULL; const apr_table_entry_t *elts = NULL; int i = 0; const char *item; /* Don't do anything if not in daemon process. */ if (!wsgi_daemon_pool) return DECLINED; /* * Remove all input/output filters except the core filters. * This will ensure that any SSL filters we don't want are * removed. This is a bit of a hack. Only other option is to * duplicate the code for core input/output filters so can * avoid full Apache connection processing, which is what is * installed the SSL filters and possibly other filters for * logging etc. */ current = c->input_filters; next = current->next; while (current) { if (current->frec == ap_core_input_filter_handle) { current = next; if (!current) break; next = current->next; continue; } ap_remove_input_filter(current); current = next; if (current) next = current->next; } current = c->output_filters; next = current->next; while (current) { if (current->frec == ap_core_output_filter_handle) { current = next; if (!current) break; next = current->next; continue; } ap_remove_output_filter(current); current = next; if (current) next = current->next; } /* Create and populate our own request object. */ apr_pool_create(&p, c->pool); r = apr_pcalloc(p, sizeof(request_rec)); r->pool = p; r->connection = c; r->server = c->base_server; r->user = NULL; r->ap_auth_type = NULL; r->allowed_methods = ap_make_method_list(p, 2); r->headers_in = apr_table_make(r->pool, 25); r->subprocess_env = apr_table_make(r->pool, 25); r->headers_out = apr_table_make(r->pool, 12); r->err_headers_out = apr_table_make(r->pool, 5); r->notes = apr_table_make(r->pool, 5); r->request_config = ap_create_request_config(r->pool); r->proto_output_filters = c->output_filters; r->output_filters = r->proto_output_filters; r->proto_input_filters = c->input_filters; r->input_filters = r->proto_input_filters; r->per_dir_config = r->server->lookup_defaults; r->sent_bodyct = 0; r->read_length = 0; r->read_body = REQUEST_NO_BODY; r->status = HTTP_OK; r->the_request = NULL; r->used_path_info = AP_REQ_DEFAULT_PATH_INFO; /* * Install our own output filter for writing back headers in * CGI script style. */ ap_add_output_filter_handle(wsgi_header_filter_handle, NULL, r, r->connection); /* Create and install the WSGI request config. */ config = (WSGIRequestConfig *)apr_pcalloc(r->pool, sizeof(WSGIRequestConfig)); ap_set_module_config(r->request_config, &wsgi_module, (void *)config); /* Grab the socket from the connection core config. */ csd = ap_get_module_config(c->conn_config, &core_module); /* * Fake up parts of the internal per request core * configuration. If we don't do this then when Apache is * compiled with the symbol AP_DEBUG, internal checks made * by Apache will result in process crashing. */ req_cfg = apr_pcalloc(r->pool, sizeof(core_request_config)); req_cfg->bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); ap_set_module_config(r->request_config, &core_module, req_cfg); /* Read in the request details and setup request object. */ if ((rv = wsgi_read_request(csd, r)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server, "mod_wsgi (pid=%d): Unable to read WSGI request.", getpid()); apr_pool_destroy(p); return HTTP_INTERNAL_SERVER_ERROR; } /* Check magic marker used to validate origin of request. */ filename = apr_table_get(r->subprocess_env, "SCRIPT_FILENAME"); script = apr_table_get(r->subprocess_env, "mod_wsgi.handler_script"); magic = apr_table_get(r->subprocess_env, "mod_wsgi.magic"); if (!magic) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server, "mod_wsgi (pid=%d): Request origin could not be " "validated.", getpid()); apr_pool_destroy(p); return HTTP_INTERNAL_SERVER_ERROR; } key = apr_psprintf(r->pool, "%ld|%s|%s|%s", wsgi_daemon_process->group->random, wsgi_daemon_process->group->socket, filename, script); hash = ap_md5(r->pool, (const unsigned char *)key); memset(key, '\0', strlen(key)); if (strcmp(magic, hash) != 0) { ap_log_error(APLOG_MARK, WSGI_LOG_ALERT(rv), wsgi_server, "mod_wsgi (pid=%d): Request origin could not be " "validated.", getpid()); apr_pool_destroy(p); return HTTP_INTERNAL_SERVER_ERROR; } apr_table_unset(r->subprocess_env, "mod_wsgi.magic"); /* * If we are executing in a chroot environment, we need to * adjust SCRIPT_FILENAME to remove leading portion of path * that corresponds to the location of the chroot directory. * Also need to adjust DOCUMENT_ROOT as well, although in * that case if it doesn't actually fall within the choot * directory, we just delete it outright as would be incorrect * if that directory lay outside of the chroot directory. */ if (wsgi_daemon_process->group->root) { const char *root; const char *path; root = wsgi_daemon_process->group->root; path = filename; if (strstr(path, root) == path && path[strlen(root)] == '/') { path += strlen(root); apr_table_set(r->subprocess_env, "SCRIPT_FILENAME", path); filename = path; } else { ap_log_error(APLOG_MARK, WSGI_LOG_CRIT(rv), wsgi_server, "mod_wsgi (pid=%d): WSGI script '%s' not located " "within chroot directory '%s'.", getpid(), path, root); return HTTP_INTERNAL_SERVER_ERROR; } path = (char *)apr_table_get(r->subprocess_env, "DOCUMENT_ROOT"); if (strstr(path, root) == path) { path += strlen(root); apr_table_set(r->subprocess_env, "DOCUMENT_ROOT", path); } else { apr_table_unset(r->subprocess_env, "DOCUMENT_ROOT"); } } r->filename = (char *)filename; /* Recalculate WSGI script file modification time. */ if ((rv = apr_stat(&r->finfo, filename, APR_FINFO_NORM, r->pool)) != APR_SUCCESS) { /* * Don't fail at this point. Allow the lack of file to * be detected later when trying to load the script file. */ ap_log_error(APLOG_MARK, WSGI_LOG_WARNING(rv), wsgi_server, "mod_wsgi (pid=%d): Unable to stat target WSGI script " "'%s'.", getpid(), filename); r->finfo.mtime = 0; } /* * Trigger mapping of host information to server configuration * so that when logging errors they go to the correct error log * file for the host. */ #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) r->connection->client_ip = (char *)apr_table_get(r->subprocess_env, "REMOTE_ADDR"); #else r->connection->remote_ip = (char *)apr_table_get(r->subprocess_env, "REMOTE_ADDR"); #endif #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) r->useragent_addr = c->client_addr; r->useragent_ip = c->client_ip; #endif key = apr_psprintf(p, "%s|%s", apr_table_get(r->subprocess_env, "mod_wsgi.listener_host"), apr_table_get(r->subprocess_env, "mod_wsgi.listener_port")); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Server listener address '%s'.", getpid(), key); } addr = (apr_sockaddr_t *)apr_hash_get(wsgi_daemon_listeners, key, APR_HASH_KEY_STRING); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Server listener address '%s' was" "%s found.", getpid(), key, addr ? "" : " not"); } if (addr) { c->local_addr = addr; } ap_update_vhost_given_ip(r->connection); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Connection server matched was " "'%s|%d'.", getpid(), c->base_server->server_hostname, c->base_server->port); } r->server = c->base_server; if (apr_table_get(r->subprocess_env, "HTTP_HOST")) { apr_table_setn(r->headers_in, "Host", apr_table_get(r->subprocess_env, "HTTP_HOST")); } ap_update_vhost_from_headers(r); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, WSGI_LOG_DEBUG(0), wsgi_server, "mod_wsgi (pid=%d): Request server matched was '%s|%d'.", getpid(), r->server->server_hostname, r->server->port); } /* * Set content length of any request content and add the * standard HTTP input filter so that standard input routines * for request content will work. */ item = apr_table_get(r->subprocess_env, "CONTENT_LENGTH"); if (item) apr_table_setn(r->headers_in, "Content-Length", item); /* Install the standard HTTP input filter. */ ap_add_input_filter("HTTP_IN", NULL, r, r->connection); /* Set details of WSGI specific request config. */ config->process_group = apr_table_get(r->subprocess_env, "mod_wsgi.process_group"); config->application_group = apr_table_get(r->subprocess_env, "mod_wsgi.application_group"); config->callable_object = apr_table_get(r->subprocess_env, "mod_wsgi.callable_object"); config->handler_script = apr_table_get(r->subprocess_env, "mod_wsgi.handler_script"); config->script_reloading = atoi(apr_table_get(r->subprocess_env, "mod_wsgi.script_reloading")); item = apr_table_get(r->subprocess_env, "mod_wsgi.enable_sendfile"); if (item && !strcasecmp(item, "1")) config->enable_sendfile = 1; else config->enable_sendfile = 0; /* * Define how input data is to be processed. This * was already done in the Apache child process and * so it shouldn't fail. More importantly, it sets * up request data tracking how much input has been * read or if more remains. */ ap_setup_client_block(r, REQUEST_CHUNKED_ERROR); /* * Where original request used chunked transfer * encoding, we have to do a further fiddle here and * make Apache think that request content length is * maximum length possible. This is to satisfy the * HTTP_IN input filter. Also flag request as being * chunked so WSGI input function doesn't think that * there may actually be that amount of data * remaining. */ item = apr_table_get(r->subprocess_env, "mod_wsgi.input_chunked"); if (item && !strcasecmp(item, "1")) { if (sizeof(apr_off_t) == sizeof(long)) { apr_table_setn(r->headers_in, "Content-Length", apr_psprintf(r->pool, "%ld", LONG_MAX)); } else { apr_table_setn(r->headers_in, "Content-Length", apr_psprintf(r->pool, "%d", INT_MAX)); } r->read_chunked = 1; } /* * Execute the actual target WSGI application. In * normal cases OK should always be returned. If * however an error occurs in importing or executing * the script or the Python code raises an exception * which is not caught and handled, then an internal * server error can be returned. As we don't want to * be triggering any error document handlers in the * daemon process we use a fake status line with 0 * as the status value. This will be picked up in * the Apache child process which will translate it * back to a 500 error so that normal error document * processing occurs. */ r->status = HTTP_OK; if (wsgi_execute_script(r) != OK) { r->status = HTTP_INTERNAL_SERVER_ERROR; r->status_line = "200 Error"; } /* * Ensure that request is finalised and any response * is flushed out. This will as a side effect read * any input data which wasn't consumed, thus * ensuring that the Apache child process isn't hung * waiting to send the request content and can * therefore process the response correctly. */ ap_finalize_request_protocol(r); bb = apr_brigade_create(r->pool, c->bucket_alloc); e = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, e); ap_pass_brigade(r->connection->output_filters, bb); apr_pool_destroy(p); return OK; } #endif /* * Apache 2.X module initialisation functions. */ static int wsgi_hook_init(apr_pool_t *pconf, apr_pool_t *ptemp, apr_pool_t *plog, server_rec *s) { void *data = NULL; const char *userdata_key = "wsgi_init"; char package[128]; int status = OK; /* * Init function gets called twice during startup, we only * need to actually do anything on the second time it is * called. This avoids unecessarily initialising and then * destroying Python for no reason. */ apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (!data) { apr_pool_userdata_set((const void *)1, userdata_key, apr_pool_cleanup_null, s->process->pool); return OK; } /* Setup module version information. */ sprintf(package, "mod_wsgi/%s", MOD_WSGI_VERSION_STRING); ap_add_version_component(pconf, package); /* Record Python version string with Apache. */ if (!Py_IsInitialized()) { char buffer[256]; const char *token = NULL; const char *version = NULL; version = Py_GetVersion(); token = version; while (*token && *token != ' ') token++; strcpy(buffer, "Python/"); strncat(buffer, version, token - version); ap_add_version_component(pconf, buffer); } /* Retain reference to base server. */ wsgi_server = s; /* Retain record of parent process ID. */ wsgi_parent_pid = getpid(); /* Determine whether multiprocess and/or multithread. */ ap_mpm_query(AP_MPMQ_IS_THREADED, &wsgi_multithread); wsgi_multithread = (wsgi_multithread != AP_MPMQ_NOT_SUPPORTED); ap_mpm_query(AP_MPMQ_IS_FORKED, &wsgi_multiprocess); if (wsgi_multiprocess != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &wsgi_multiprocess); wsgi_multiprocess = (wsgi_multiprocess != 1); } /* Retain reference to main server config. */ wsgi_server_config = ap_get_module_config(s->module_config, &wsgi_module); /* * Check that the version of Python found at * runtime is what was used at compilation. */ wsgi_python_version(); /* * Initialise Python if required to be done in * the parent process. Note that it will not be * initialised if mod_python loaded and it has * already been done. */ if (wsgi_python_required == -1) wsgi_python_required = 1; if (!wsgi_python_after_fork) wsgi_python_init(pconf); /* Startup separate named daemon processes. */ #if defined(MOD_WSGI_WITH_DAEMONS) status = wsgi_start_daemons(pconf); #endif return status; } static void wsgi_hook_child_init(apr_pool_t *p, server_rec *s) { #if defined(MOD_WSGI_WITH_DAEMONS) WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i; /* Close listener sockets for daemon processes. */ if (wsgi_daemon_list) { entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; close(entry->listener_fd); entry->listener_fd = -1; } } #endif if (wsgi_python_required) { /* * Initialise Python if required to be done in * the child process. Note that it will not be * initialised if mod_python loaded and it has * already been done. */ if (wsgi_python_after_fork) wsgi_python_init(p); /* * Now perform additional initialisation steps * always done in child process. */ wsgi_python_child_init(p); } } #if defined(MOD_WSGI_WITH_AAA_HANDLERS) #include "apr_lib.h" static char *wsgi_original_uri(request_rec *r) { char *first, *last; if (r->the_request == NULL) { return (char *) apr_pcalloc(r->pool, 1); } first = r->the_request; /* use the request-line */ while (*first && !apr_isspace(*first)) { ++first; /* skip over the method */ } while (apr_isspace(*first)) { ++first; /* and the space(s) */ } last = first; while (*last && !apr_isspace(*last)) { ++last; /* end at next whitespace */ } return apr_pstrmemdup(r->pool, first, last - first); } static char *wsgi_http2env(apr_pool_t *a, const char *w) { char *res = (char *)apr_palloc(a, sizeof("HTTP_") + strlen(w)); char *cp = res; char c; *cp++ = 'H'; *cp++ = 'T'; *cp++ = 'T'; *cp++ = 'P'; *cp++ = '_'; while ((c = *w++) != 0) { if (!apr_isalnum(c)) { *cp++ = '_'; } else { *cp++ = apr_toupper(c); } } *cp = 0; return res; } typedef struct { PyObject_HEAD request_rec *r; WSGIRequestConfig *config; PyObject *log; } AuthObject; static AuthObject *newAuthObject(request_rec *r, WSGIRequestConfig *config) { AuthObject *self; self = PyObject_New(AuthObject, &Auth_Type); if (self == NULL) return NULL; self->config = config; self->r = r; self->log = newLogObject(r, APLOG_ERR, NULL); return self; } static void Auth_dealloc(AuthObject *self) { Py_DECREF(self->log); PyObject_Del(self); } static PyObject *Auth_environ(AuthObject *self, const char *group) { PyObject *vars; PyObject *object; request_rec *r = self->r; server_rec *s = r->server; conn_rec *c = r->connection; apr_port_t rport; const apr_array_header_t *hdrs_arr; const apr_table_entry_t *hdrs; const char *value = NULL; int i; vars = PyDict_New(); hdrs_arr = apr_table_elts(r->headers_in); hdrs = (const apr_table_entry_t *) hdrs_arr->elts; for (i = 0; i < hdrs_arr->nelts; ++i) { if (!hdrs[i].key) { continue; } if (!strcasecmp(hdrs[i].key, "Content-type")) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(hdrs[i].val, strlen(hdrs[i].val), NULL); #else object = PyString_FromString(hdrs[i].val); #endif PyDict_SetItemString(vars, "CONTENT_TYPE", object); Py_DECREF(object); } else if (!strcasecmp(hdrs[i].key, "Content-length")) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(hdrs[i].val, strlen(hdrs[i].val), NULL); #else object = PyString_FromString(hdrs[i].val); #endif PyDict_SetItemString(vars, "CONTENT_LENGTH", object); Py_DECREF(object); } else if (!strcasecmp(hdrs[i].key, "Authorization") || !strcasecmp(hdrs[i].key, "Proxy-Authorization")) { continue; } else { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(hdrs[i].val, strlen(hdrs[i].val), NULL); #else object = PyString_FromString(hdrs[i].val); #endif PyDict_SetItemString(vars, wsgi_http2env(r->pool, hdrs[i].key), object); Py_DECREF(object); } } value = ap_psignature("", r); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_SIGNATURE", object); Py_DECREF(object); #if AP_MODULE_MAGIC_AT_LEAST(20060905,0) value = ap_get_server_banner(); #else value = ap_get_server_version(); #endif #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_SOFTWARE", object); Py_DECREF(object); value = ap_escape_html(r->pool, ap_get_server_name(r)); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_NAME", object); Py_DECREF(object); if (r->connection->local_ip) { value = r->connection->local_ip; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_ADDR", object); Py_DECREF(object); } value = apr_psprintf(r->pool, "%u", ap_get_server_port(r)); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_PORT", object); Py_DECREF(object); value = ap_get_remote_host(c, r->per_dir_config, REMOTE_HOST, NULL); if (value) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_HOST", object); Py_DECREF(object); } #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) if (r->useragent_ip) { value = r->useragent_ip; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_ADDR", object); Py_DECREF(object); } #else if (c->remote_ip) { value = c->remote_ip; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_ADDR", object); Py_DECREF(object); } #endif #if PY_MAJOR_VERSION >= 3 value = ap_document_root(r); object = PyUnicode_Decode(value, strlen(value), Py_FileSystemDefaultEncoding, "surrogateescape"); #else object = PyString_FromString(ap_document_root(r)); #endif PyDict_SetItemString(vars, "DOCUMENT_ROOT", object); Py_DECREF(object); if (s->server_admin) { value = s->server_admin; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_ADMIN", object); Py_DECREF(object); } #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) rport = c->client_addr->port; value = apr_itoa(r->pool, rport); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_PORT", object); Py_DECREF(object); #else rport = c->remote_addr->port; value = apr_itoa(r->pool, rport); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_PORT", object); Py_DECREF(object); #endif value = r->protocol; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_PROTOCOL", object); Py_DECREF(object); value = r->method; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REQUEST_METHOD", object); Py_DECREF(object); value = r->args ? r->args : ""; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "QUERY_STRING", object); Py_DECREF(object); value = wsgi_original_uri(r); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REQUEST_URI", object); Py_DECREF(object); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString(""); #else object = PyString_FromString(""); #endif PyDict_SetItemString(vars, "mod_wsgi.process_group", object); Py_DECREF(object); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(group, strlen(group), NULL); #else object = PyString_FromString(group); #endif PyDict_SetItemString(vars, "mod_wsgi.application_group", object); Py_DECREF(object); object = PyLong_FromLong(self->config->script_reloading); PyDict_SetItemString(vars, "mod_wsgi.script_reloading", object); Py_DECREF(object); /* * Setup log object for WSGI errors. Don't decrement * reference to log object as keep reference to it. */ object = (PyObject *)self->log; PyDict_SetItemString(vars, "wsgi.errors", object); /* * If Apache extensions are enabled add a CObject reference * to the Apache request_rec structure instance. */ if (!wsgi_daemon_pool && self->config->pass_apache_request) { #if PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2 object = PyCapsule_New(self->r, 0, 0); #else object = PyCObject_FromVoidPtr(self->r, 0); #endif PyDict_SetItemString(vars, "apache.request_rec", object); Py_DECREF(object); } /* * Extensions for accessing SSL certificate information from * mod_ssl when in use. */ #if AP_SERVER_MAJORVERSION_NUMBER >= 2 object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https"); PyDict_SetItemString(vars, "mod_ssl.is_https", object); Py_DECREF(object); object = PyObject_GetAttrString((PyObject *)self, "ssl_var_lookup"); PyDict_SetItemString(vars, "mod_ssl.var_lookup", object); Py_DECREF(object); #endif return vars; } #if AP_SERVER_MAJORVERSION_NUMBER >= 2 static PyObject *Auth_ssl_is_https(AuthObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, ":ssl_is_https")) return NULL; ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (ssl_is_https == 0) return Py_BuildValue("i", 0); return Py_BuildValue("i", ssl_is_https(self->r->connection)); } static PyObject *Auth_ssl_var_lookup(AuthObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *ssl_var_lookup = 0; PyObject *item = NULL; char *name = 0; char *value = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:ssl_var_lookup", &item)) return NULL; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "byte string value expected, " "value containing non 'latin-1' characters found"); Py_DECREF(item); return NULL; } Py_DECREF(item); item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "byte string value expected, value " "of type %.200s found", item->ob_type->tp_name); Py_DECREF(item); return NULL; } name = PyString_AsString(item); ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); if (ssl_var_lookup == 0) { Py_XINCREF(Py_None); return Py_None; } value = ssl_var_lookup(self->r->pool, self->r->server, self->r->connection, self->r, name); if (!value) { Py_XINCREF(Py_None); return Py_None; } #if PY_MAJOR_VERSION >= 3 return PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else return PyString_FromString(value); #endif } #endif static PyMethodDef Auth_methods[] = { #if AP_SERVER_MAJORVERSION_NUMBER >= 2 { "ssl_is_https", (PyCFunction)Auth_ssl_is_https, METH_VARARGS, 0 }, { "ssl_var_lookup", (PyCFunction)Auth_ssl_var_lookup, METH_VARARGS, 0 }, #endif { NULL, NULL} }; static PyTypeObject Auth_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Auth", /*tp_name*/ sizeof(AuthObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Auth_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ Auth_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; #if defined(MOD_WSGI_WITH_AUTHN_PROVIDER) static authn_status wsgi_check_password(request_rec *r, const char *user, const char *password) { WSGIRequestConfig *config; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; authn_status status; config = wsgi_create_req_config(r->pool, r); if (!config->auth_user_script) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Location of WSGI user " "authentication script not provided.", getpid()); return AUTH_GENERAL_ERROR; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->auth_user_script->handler_script; group = wsgi_server_group(r, config->auth_user_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return AUTH_GENERAL_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Assume an internal server error unless everything okay. */ status = AUTH_GENERAL_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "check_password"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *result = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Oss)", vars, user, password); result = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (result) { if (result == Py_None) { status = AUTH_USER_NOT_FOUND; } else if (result == Py_True) { status = AUTH_GRANTED; } else if (result == Py_False) { status = AUTH_DENIED; } else { PyErr_SetString(PyExc_TypeError, "Basic auth " "provider must return True, False " "or None"); } Py_DECREF(result); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); object = PyEval_CallObject(method, args); Py_DECREF(args); } Py_XDECREF(object); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } else Py_DECREF(object); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Target WSGI user " "authentication script '%s' does not provide " "'Basic' auth provider.", getpid(), script); Py_END_ALLOW_THREADS } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script); } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } static authn_status wsgi_get_realm_hash(request_rec *r, const char *user, const char *realm, char **rethash) { WSGIRequestConfig *config; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; authn_status status; config = wsgi_create_req_config(r->pool, r); if (!config->auth_user_script) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Location of WSGI user " "authentication script not provided.", getpid()); return AUTH_GENERAL_ERROR; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->auth_user_script->handler_script; group = wsgi_server_group(r, config->auth_user_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return AUTH_GENERAL_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Assume an internal server error unless everything okay. */ status = AUTH_GENERAL_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "get_realm_hash"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *result = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Oss)", vars, user, realm); result = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (result) { if (result == Py_None) { status = AUTH_USER_NOT_FOUND; } else if (PyString_Check(result)) { *rethash = PyString_AsString(result); *rethash = apr_pstrdup(r->pool, *rethash); status = AUTH_USER_FOUND; } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(result); if (!latin_item) { PyErr_SetString(PyExc_TypeError, "Digest auth " "provider must return None " "or string object, value " "containing non 'latin-1' " "characters found"); } else { Py_DECREF(result); result = latin_item; *rethash = PyString_AsString(result); *rethash = apr_pstrdup(r->pool, *rethash); status = AUTH_USER_FOUND; } } #endif else { PyErr_SetString(PyExc_TypeError, "Digest auth " "provider must return None " "or string object"); } Py_DECREF(result); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); object = PyEval_CallObject(method, args); Py_DECREF(args); } Py_XDECREF(object); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } else Py_DECREF(object); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Target WSGI user " "authentication script '%s' does not provide " "'Digest' auth provider.", getpid(), script); Py_END_ALLOW_THREADS } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script); } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } static const authn_provider wsgi_authn_provider = { &wsgi_check_password, &wsgi_get_realm_hash }; #endif static int wsgi_groups_for_user(request_rec *r, WSGIRequestConfig *config, apr_table_t **grpstatus) { apr_table_t *grps = apr_table_make(r->pool, 15); InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; int status = HTTP_INTERNAL_SERVER_ERROR; if (!config->auth_group_script) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Location of WSGI group " "authentication script not provided.", getpid()); return HTTP_INTERNAL_SERVER_ERROR; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->auth_group_script->handler_script; group = wsgi_server_group(r, config->auth_group_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return HTTP_INTERNAL_SERVER_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Assume an internal server error unless everything okay. */ status = HTTP_INTERNAL_SERVER_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "groups_for_user"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *sequence = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Os)", vars, r->user); sequence = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (sequence) { PyObject *iterator; iterator = PyObject_GetIter(sequence); if (iterator) { PyObject *item; const char *name; status = OK; while ((item = PyIter_Next(iterator))) { #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): " "Groups for user returned " "from '%s' must be an " "iterable sequence of " "byte strings, value " "containing non 'latin-1' " "characters found", getpid(), script); Py_END_ALLOW_THREADS Py_DECREF(item); status = HTTP_INTERNAL_SERVER_ERROR; break; } else { Py_DECREF(item); item = latin_item; } } #endif if (!PyString_Check(item)) { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Groups for " "user returned from '%s' must " "be an iterable sequence of " "byte strings.", getpid(), script); Py_END_ALLOW_THREADS Py_DECREF(item); status = HTTP_INTERNAL_SERVER_ERROR; break; } name = PyString_AsString(item); apr_table_setn(grps, apr_pstrdup(r->pool, name), "1"); Py_DECREF(item); } Py_DECREF(iterator); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Groups for user " "returned from '%s' must be an iterable " "sequence of byte strings.", getpid(), script); Py_END_ALLOW_THREADS } Py_DECREF(sequence); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); object = PyEval_CallObject(method, args); Py_DECREF(args); } Py_XDECREF(object); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } else Py_DECREF(object); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Target WSGI group " "authentication script '%s' does not provide " "group provider.", getpid(), script); Py_END_ALLOW_THREADS } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script); } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); if (status == OK) *grpstatus = grps; return status; } static int wsgi_allow_access(request_rec *r, WSGIRequestConfig *config, const char *host) { InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; int result = 0; if (!config->access_script) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Location of WSGI host " "access script not provided.", getpid()); return 0; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->access_script->handler_script; group = wsgi_server_group(r, config->access_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return 0; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Assume not allowed unless everything okay. */ result = 0; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "allow_access"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *flag = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Oz)", vars, host); flag = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (flag) { if (flag == Py_None) { result = -1; } else if (PyBool_Check(flag)) { if (flag == Py_True) result = 1; } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Indicator of " "host accessibility returned from '%s' " "must a boolean or None.", getpid(), script); Py_END_ALLOW_THREADS } Py_DECREF(flag); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); object = PyEval_CallObject(method, args); Py_DECREF(args); } Py_XDECREF(object); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } else Py_DECREF(object); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Target WSGI host " "access script '%s' does not provide " "host validator.", getpid(), script); Py_END_ALLOW_THREADS } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script); } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return result; } static int wsgi_hook_access_checker(request_rec *r) { WSGIRequestConfig *config; int allow = 0; const char *host = NULL; config = wsgi_create_req_config(r->pool, r); if (!config->access_script) return DECLINED; host = ap_get_remote_host(r->connection, r->per_dir_config, REMOTE_HOST, NULL); #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) if (!host) host = r->useragent_ip; #else if (!host) host = r->connection->remote_ip; #endif allow = wsgi_allow_access(r, config, host); if (allow < 0) return DECLINED; else if (allow) return OK; if (ap_satisfies(r) != SATISFY_ANY || !ap_some_auth_required(r)) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): " "Client denied by server configuration: '%s'.", getpid(), r->filename); } return HTTP_FORBIDDEN; } static int wsgi_hook_check_user_id(request_rec *r) { WSGIRequestConfig *config; int status = -1; const char *password; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; if ((status = ap_get_basic_auth_pw(r, &password))) return status; config = wsgi_create_req_config(r->pool, r); if (!config->auth_user_script) return DECLINED; /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->auth_user_script->handler_script; group = wsgi_server_group(r, config->auth_user_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, WSGI_LOG_CRIT(0), r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return HTTP_INTERNAL_SERVER_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Assume an internal server error unless everything okay. */ status = HTTP_INTERNAL_SERVER_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "check_password"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *result = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Oss)", vars, r->user, password); result = PyEval_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (result) { if (result == Py_None) { if (config->user_authoritative) { ap_note_basic_auth_failure(r); status = HTTP_UNAUTHORIZED; ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): User '%s' not " "found in executing authentication " "script '%s', for uri '%s'.", getpid(), r->user, script, r->uri); } else status = DECLINED; } else if (result == Py_True) { status = OK; } else if (result == Py_False) { ap_note_basic_auth_failure(r); status = HTTP_UNAUTHORIZED; ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Password mismatch " "for user '%s' in executing " "authentication script '%s', for uri " "'%s'.", getpid(), r->user, script, r->uri); } else { PyErr_SetString(PyExc_TypeError, "Basic auth " "provider must return True, False " "or None"); } Py_DECREF(result); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); object = PyEval_CallObject(method, args); Py_DECREF(args); } Py_XDECREF(object); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } else Py_DECREF(object); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): Target WSGI user " "authentication script '%s' does not provide " "'Basic' auth provider.", getpid(), script); Py_END_ALLOW_THREADS } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script); } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } #if defined(MOD_WSGI_WITH_AUTHZ_PROVIDER) #if MOD_WSGI_WITH_AUTHZ_PROVIDER_PARSED static authz_status wsgi_check_authorization(request_rec *r, const char *require_args, const void *parsed_require_line) #else static authz_status wsgi_check_authorization(request_rec *r, const char *require_args) #endif { WSGIRequestConfig *config; apr_table_t *grpstatus = NULL; const char *t, *w; int status; config = wsgi_create_req_config(r->pool, r); if (!config->auth_group_script) { ap_log_error(APLOG_MARK, WSGI_LOG_ERR(0), wsgi_server, "mod_wsgi (pid=%d): Location of WSGI group " "authorization script not provided.", getpid()); return AUTHZ_DENIED; } status = wsgi_groups_for_user(r, config, &grpstatus); if (status != OK) return AUTHZ_DENIED; if (apr_table_elts(grpstatus)->nelts == 0) { ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): " "Authorization of user '%s' to access '%s' failed. " "User is not a member of any groups.", getpid(), r->user, r->uri); return AUTHZ_DENIED; } t = require_args; while ((w = ap_getword_conf(r->pool, &t)) && w[0]) { if (apr_table_get(grpstatus, w)) { return AUTHZ_GRANTED; } } ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): " "Authorization of user '%s' to access '%s' failed. " "User is not a member of designated groups.", getpid(), r->user, r->uri); return AUTHZ_DENIED; } static const authz_provider wsgi_authz_provider = { &wsgi_check_authorization, #if MOD_WSGI_WITH_AUTHZ_PROVIDER_PARSED NULL, #endif }; #else static int wsgi_hook_auth_checker(request_rec *r) { WSGIRequestConfig *config; int m = r->method_number; const apr_array_header_t *reqs_arr; require_line *reqs; int required_group = 0; register int x; const char *t, *w; apr_table_t *grpstatus = NULL; char *reason = NULL; config = wsgi_create_req_config(r->pool, r); if (!config->auth_group_script) return DECLINED; reqs_arr = ap_requires(r); if (!reqs_arr) return DECLINED; reqs = (require_line *)reqs_arr->elts; for (x = 0; x < reqs_arr->nelts; x++) { if (!(reqs[x].method_mask & (AP_METHOD_BIT << m))) { continue; } t = reqs[x].requirement; w = ap_getword_white(r->pool, &t); if (!strcasecmp(w, "group") || !strcasecmp(w, "wsgi-group")) { required_group = 1; if (!grpstatus) { int status; status = wsgi_groups_for_user(r, config, &grpstatus); if (status != OK) return status; if (apr_table_elts(grpstatus)->nelts == 0) { reason = "User is not a member of any groups"; break; } } while (t[0]) { w = ap_getword_conf(r->pool, &t); if (apr_table_get(grpstatus, w)) { return OK; } } } } if (!required_group || !config->group_authoritative) return DECLINED; ap_log_rerror(APLOG_MARK, WSGI_LOG_ERR(0), r, "mod_wsgi (pid=%d): " "Authorization of user '%s' to access '%s' failed. %s.", getpid(), r->user, r->uri, reason ? reason : "User is not " "a member of designated groups"); ap_note_auth_failure(r); return HTTP_UNAUTHORIZED; } #endif #endif APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *wsgi_logio_add_bytes_out; static void ap_logio_add_bytes_out(conn_rec *c, apr_off_t bytes) { if (!wsgi_daemon_pool && wsgi_logio_add_bytes_out) wsgi_logio_add_bytes_out(c, bytes); } static int wsgi_hook_logio(apr_pool_t *pconf, apr_pool_t *ptemp, apr_pool_t *plog, server_rec *s) { /* * This horrible fiddle is to insert a proxy function before * the normal ap_logio_add_bytes_out() function so that the * call to it can be disabled when mod_wsgi running in daemon * mode. If this is not done, then daemon process will crash * when mod_logio has been loaded. */ wsgi_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out); APR_REGISTER_OPTIONAL_FN(ap_logio_add_bytes_out); return OK; } static void wsgi_register_hooks(apr_pool_t *p) { static const char * const p1[] = { "mod_alias.c", NULL }; static const char * const n1[]= { "mod_userdir.c", "mod_vhost_alias.c", NULL }; static const char * const n2[] = { "core.c", NULL }; #if defined(MOD_WSGI_WITH_AAA_HANDLERS) #if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER) static const char * const p3[] = { "mod_auth.c", NULL }; #endif #if !defined(MOD_WSGI_WITH_AUTHZ_PROVIDER) static const char * const n4[] = { "mod_authz_user.c", NULL }; #endif static const char * const n5[] = { "mod_authz_host.c", NULL }; #endif static const char * const p6[] = { "mod_python.c", NULL }; static const char * const p7[] = { "mod_ssl.c", NULL }; ap_hook_post_config(wsgi_hook_init, p6, NULL, APR_HOOK_MIDDLE); ap_hook_child_init(wsgi_hook_child_init, p6, NULL, APR_HOOK_MIDDLE); ap_hook_translate_name(wsgi_hook_intercept, p1, n1, APR_HOOK_MIDDLE); ap_hook_handler(wsgi_hook_handler, NULL, NULL, APR_HOOK_MIDDLE); #if defined(MOD_WSGI_WITH_DAEMONS) ap_hook_post_config(wsgi_hook_logio, NULL, n2, APR_HOOK_REALLY_FIRST); wsgi_header_filter_handle = ap_register_output_filter("WSGI_HEADER", wsgi_header_filter, NULL, AP_FTYPE_PROTOCOL); #endif #if defined(MOD_WSGI_WITH_AAA_HANDLERS) #if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER) ap_hook_check_user_id(wsgi_hook_check_user_id, p3, NULL, APR_HOOK_MIDDLE); #else ap_register_provider(p, AUTHN_PROVIDER_GROUP, "wsgi", AUTHN_PROVIDER_VERSION, &wsgi_authn_provider); #endif #if !defined(MOD_WSGI_WITH_AUTHZ_PROVIDER) ap_hook_auth_checker(wsgi_hook_auth_checker, NULL, n4, APR_HOOK_MIDDLE); #else ap_register_provider(p, AUTHZ_PROVIDER_GROUP, "wsgi-group", AUTHZ_PROVIDER_VERSION, &wsgi_authz_provider); #endif ap_hook_access_checker(wsgi_hook_access_checker, p7, n5, APR_HOOK_MIDDLE); #endif } static const command_rec wsgi_commands[] = { AP_INIT_RAW_ARGS("WSGIScriptAlias", wsgi_add_script_alias, NULL, RSRC_CONF, "Map location to target WSGI script file."), AP_INIT_RAW_ARGS("WSGIScriptAliasMatch", wsgi_add_script_alias, "*", RSRC_CONF, "Map location pattern to target WSGI script file."), #if defined(MOD_WSGI_WITH_DAEMONS) AP_INIT_RAW_ARGS("WSGIDaemonProcess", wsgi_add_daemon_process, NULL, RSRC_CONF, "Specify details of daemon processes to start."), AP_INIT_TAKE1("WSGISocketPrefix", wsgi_set_socket_prefix, NULL, RSRC_CONF, "Path prefix for the daemon process sockets."), AP_INIT_TAKE1("WSGIAcceptMutex", wsgi_set_accept_mutex, NULL, RSRC_CONF, "Set accept mutex type for daemon processes."), AP_INIT_TAKE1("WSGILazyInitialization", wsgi_set_lazy_initialization, NULL, RSRC_CONF, "Enable/Disable lazy Python initialization."), #endif AP_INIT_TAKE1("WSGIVerboseDebugging", wsgi_set_verbose_debugging, NULL, RSRC_CONF, "Enable/Disable verbose debugging messages."), #if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6 AP_INIT_TAKE1("WSGIPy3kWarningFlag", wsgi_set_py3k_warning_flag, NULL, RSRC_CONF, "Enable/Disable Python 3.0 warnings."), AP_INIT_TAKE1("WSGIDontWriteBytecode", wsgi_set_dont_write_bytecode, NULL, RSRC_CONF, "Enable/Disable writing of byte code."), #endif AP_INIT_TAKE1("WSGIPythonWarnings", wsgi_add_python_warnings, NULL, RSRC_CONF, "Control Python warning messages."), AP_INIT_TAKE1("WSGIPythonOptimize", wsgi_set_python_optimize, NULL, RSRC_CONF, "Set level of Python compiler optimisations."), AP_INIT_TAKE1("WSGIPythonHome", wsgi_set_python_home, NULL, RSRC_CONF, "Python prefix/exec_prefix absolute path names."), AP_INIT_TAKE1("WSGIPythonPath", wsgi_set_python_path, NULL, RSRC_CONF, "Python module search path."), AP_INIT_TAKE1("WSGIPythonEggs", wsgi_set_python_eggs, NULL, RSRC_CONF, "Python eggs cache directory."), #if defined(MOD_WSGI_WITH_DAEMONS) AP_INIT_TAKE1("WSGIRestrictEmbedded", wsgi_set_restrict_embedded, NULL, RSRC_CONF, "Enable/Disable use of embedded mode."), #endif AP_INIT_TAKE1("WSGIRestrictStdin", wsgi_set_restrict_stdin, NULL, RSRC_CONF, "Enable/Disable restrictions on use of STDIN."), AP_INIT_TAKE1("WSGIRestrictStdout", wsgi_set_restrict_stdout, NULL, RSRC_CONF, "Enable/Disable restrictions on use of STDOUT."), AP_INIT_TAKE1("WSGIRestrictSignal", wsgi_set_restrict_signal, NULL, RSRC_CONF, "Enable/Disable restrictions on use of signal()."), AP_INIT_TAKE1("WSGICaseSensitivity", wsgi_set_case_sensitivity, NULL, RSRC_CONF, "Define whether file system is case sensitive."), #if defined(MOD_WSGI_WITH_DAEMONS) AP_INIT_RAW_ARGS("WSGIRestrictProcess", wsgi_set_restrict_process, NULL, ACCESS_CONF|RSRC_CONF, "Limit selectable WSGI process groups."), AP_INIT_TAKE1("WSGIProcessGroup", wsgi_set_process_group, NULL, ACCESS_CONF|RSRC_CONF, "Name of the WSGI process group."), #endif AP_INIT_TAKE1("WSGIApplicationGroup", wsgi_set_application_group, NULL, ACCESS_CONF|RSRC_CONF, "Application interpreter group."), AP_INIT_TAKE1("WSGICallableObject", wsgi_set_callable_object, NULL, OR_FILEINFO, "Name of entry point in WSGI script file."), AP_INIT_RAW_ARGS("WSGIImportScript", wsgi_add_import_script, NULL, RSRC_CONF, "Location of WSGI import script."), AP_INIT_RAW_ARGS("WSGIDispatchScript", wsgi_set_dispatch_script, NULL, ACCESS_CONF|RSRC_CONF, "Location of WSGI dispatch script."), AP_INIT_TAKE1("WSGIPassApacheRequest", wsgi_set_pass_apache_request, NULL, ACCESS_CONF|RSRC_CONF, "Enable/Disable Apache request object."), AP_INIT_TAKE1("WSGIPassAuthorization", wsgi_set_pass_authorization, NULL, OR_FILEINFO, "Enable/Disable WSGI authorization."), AP_INIT_TAKE1("WSGIScriptReloading", wsgi_set_script_reloading, NULL, OR_FILEINFO, "Enable/Disable script reloading mechanism."), AP_INIT_TAKE1("WSGIErrorOverride", wsgi_set_error_override, NULL, OR_FILEINFO, "Enable/Disable overriding of error pages."), AP_INIT_TAKE1("WSGIChunkedRequest", wsgi_set_chunked_request, NULL, OR_FILEINFO, "Enable/Disable support for chunked requests."), #ifndef WIN32 #if AP_SERVER_MAJORVERSION_NUMBER >= 2 AP_INIT_TAKE1("WSGIEnableSendfile", wsgi_set_enable_sendfile, NULL, OR_FILEINFO, "Enable/Disable support for kernel sendfile."), #endif #endif #if defined(MOD_WSGI_WITH_AAA_HANDLERS) AP_INIT_RAW_ARGS("WSGIAccessScript", wsgi_set_access_script, NULL, OR_AUTHCFG, "Location of WSGI host access script file."), AP_INIT_RAW_ARGS("WSGIAuthUserScript", wsgi_set_auth_user_script, NULL, OR_AUTHCFG, "Location of WSGI user auth script file."), AP_INIT_RAW_ARGS("WSGIAuthGroupScript", wsgi_set_auth_group_script, NULL, OR_AUTHCFG, "Location of WSGI group auth script file."), #if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER) AP_INIT_TAKE1("WSGIUserAuthoritative", wsgi_set_user_authoritative, NULL, OR_AUTHCFG, "Enable/Disable as being authoritative on users."), #endif AP_INIT_TAKE1("WSGIGroupAuthoritative", wsgi_set_group_authoritative, NULL, OR_AUTHCFG, "Enable/Disable as being authoritative on groups."), #endif AP_INIT_RAW_ARGS("WSGIHandlerScript", wsgi_add_handler_script, NULL, ACCESS_CONF|RSRC_CONF, "Location of WSGI handler script file."), { NULL } }; /* Dispatch list for API hooks */ module AP_MODULE_DECLARE_DATA wsgi_module = { STANDARD20_MODULE_STUFF, wsgi_create_dir_config, /* create per-dir config structures */ wsgi_merge_dir_config, /* merge per-dir config structures */ wsgi_create_server_config, /* create per-server config structures */ wsgi_merge_server_config, /* merge per-server config structures */ wsgi_commands, /* table of config file commands */ wsgi_register_hooks /* register hooks */ }; #endif mod_wsgi-3.4/posix-ap1X.mk.in000644 000765 000024 00000002253 12015317464 016506 0ustar00grahamstaff000000 000000 # Copyright 2007 GRAHAM DUMPLETON # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. APXS = @APXS@ PYTHON = @PYTHON@ DESTDIR = LIBEXECDIR = @LIBEXECDIR@ CPPFLAGS = @CPPFLAGS@ CFLAGS = @CFLAGS@ LDFLAGS = @LDFLAGS@ LDLIBS = @LDLIBS@ all : mod_wsgi.so mod_wsgi.so : mod_wsgi.c $(APXS) -c $(CPPFLAGS) $(CFLAGS) mod_wsgi.c $(LDFLAGS) $(LDLIBS) $(DESTDIR)$(LIBEXECDIR) : mkdir -p $@ install : all $(DESTDIR)$(LIBEXECDIR) $(APXS) -i -S LIBEXECDIR=$(DESTDIR)$(LIBEXECDIR) -n 'mod_wsgi' mod_wsgi.so clean : -rm -f mod_wsgi.o mod_wsgi.so -rm -f config.log config.status -rm -rf autom4te.cache distclean : clean -rm -f Makefile Makefile.in realclean : distclean -rm -f configure mod_wsgi-3.4/posix-ap2X.mk.in000644 000765 000024 00000002337 12015317464 016512 0ustar00grahamstaff000000 000000 # Copyright 2007 GRAHAM DUMPLETON # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. APXS = @APXS@ PYTHON = @PYTHON@ DESTDIR = LIBEXECDIR = @LIBEXECDIR@ CPPFLAGS = @CPPFLAGS@ CFLAGS = @CFLAGS@ LDFLAGS = @LDFLAGS@ LDLIBS = @LDLIBS@ all : mod_wsgi.la mod_wsgi.la : mod_wsgi.c $(APXS) -c $(CPPFLAGS) $(CFLAGS) mod_wsgi.c $(LDFLAGS) $(LDLIBS) $(DESTDIR)$(LIBEXECDIR) : mkdir -p $@ install : all $(DESTDIR)$(LIBEXECDIR) $(APXS) -i -S LIBEXECDIR=$(DESTDIR)$(LIBEXECDIR) -n 'mod_wsgi' mod_wsgi.la clean : -rm -rf .libs -rm -f mod_wsgi.o mod_wsgi.la mod_wsgi.lo mod_wsgi.slo mod_wsgi.loT -rm -f config.log config.status -rm -rf autom4te.cache distclean : clean -rm -f Makefile Makefile.in realclean : distclean -rm -f configure mod_wsgi-3.4/README000644 000765 000024 00000043217 12015317601 014455 0ustar00grahamstaff000000 000000 =================== Welcome to MOD_WSGI =================== Copyright 2007-2012 GRAHAM DUMPLETON The mod_wsgi adapter is an Apache module that provides a WSGI compliant interface for hosting Python based web applications within Apache. The adapter is written completely in C code against the Apache C runtime and for hosting WSGI applications within Apache has a lower overhead than using existing WSGI adapters for mod_python or CGI. The package can be compiled for and used with either Apache 1.3, 2.0 or 2.2. On UNIX systems, either the single threaded 'prefork' or multithreaded 'worker' Apache MPMs can be used. Note that 'daemon' mode of mod_wsgi is only available if Apache 2.0 or 2.2 is being used on a UNIX platform and where the Apache run time library has been compiled with support for multithreading. Generally multithreading support is standard except for on some BSD variants. Whatever version of Python is used, it must also have been compiled with support for multithreading. To avoid a measure of memory bloat with your Apache processes, Python should also have been compiled with shared library support enabled. A number of Python binary packages for Linux systems are not compiled with shared library support enabled. You should therefore consider recompiling Python from source code with shared library support enabled. If a shared library is not used, you will have problems trying to use mod_wsgi on a server where mod_python is also being loaded. If using a Python binary package for a Linux system, also ensure that the you have the corresponding 'dev' package installed for the Python package you have installed. Without this package you will be missing the Python header files and configuration files need to build mod_wsgi. You should also ensure you have the corresponding 'dev' package for the Apache web server package you are using. At this time only build scripts for UNIX systems are supplied. The code should still work on Windows, but still waiting for someone to volunteer to help develop and test some build scripts for Windows and ensure that mod_wsgi works on that platform. The source code in this package is made available under the terms of the Apache Licence, Version 2.0. See the "LICENCE" file for more information. Donations ========= A significant amount of time and effort has gone into producing this software, the associated documentation on the web site and in providing support to users. So, although you are free to download it and do with it as you wish, it is not free from cost. If you are using mod_wsgi, please consider making a donation. By giving a donation you help to ensure that mod_wsgi will continue to be developed and the documentation further improved. For details on how you can contribute back to mod_wsgi, see: http://code.google.com/p/modwsgi/wiki/HowToContributeBack If you are philosophically against the idea of contributing back to Open Source projects in a monetary way and find it obnoxious that any Open Source project would solicit donations, then it is suggested that you use one of the alternatives to mod_wsgi such as CGI. Documentation ============= Note that this README only covers installation and configuration for a basic WSGI application in 'embedded' mode. For a more detailed explanation of how to use mod_wsgi, consult the documentation found at: http://www.modwsgi.org In particular, refer to the installation and configuration information in the following documentation: http://code.google.com/p/modwsgi/wiki/InstallationInstructions http://code.google.com/p/modwsgi/wiki/ConfigurationGuidelines If you experience any sort of problems in installing and getting mod_wsgi to work, or your WSGI application appears to not work properly, ensure you also read: http://code.google.com/p/modwsgi/wiki/InstallationIssues http://code.google.com/p/modwsgi/wiki/ConfigurationIssues http://code.google.com/p/modwsgi/wiki/ApplicationIssues http://code.google.com/p/modwsgi/wiki/DebuggingTechniques If you have questions specifically about mod_wsgi and its configuration and use, use the Google discussion group at: http://groups.google.com/group/modwsgi In the first instance it is highly recommended that you come to the mod_wsgi group even when sorting out problems related to running some third party package on top of mod_wsgi. This is because people on other support forums are not going to necessarily have the knowledge to help you. If your questions are perhaps more about WSGI in general, you can also use the Python Web-SIG or comp.lang.python USENET groups to get further information: http://groups.google.com/group/python-web-sig http://groups.google.com/group/comp.lang.python Installation (UNIX) =================== To setup the package ready for building run the "configure" script. ./configure The configure script will attempt to identify the Apache installation to use by searching for either the "apxs2" or "apxs" tools included with your Apache installation. Similarly, which Python installation to use will be determined by looking for the "python" executable. If using a Linux system and you do not have the appropriate Apache "dev" package installed, then "apxs" will not be able to be found when "configure" is run. Also ensure that you have installed the correct "dev" package for the version of Apache being used. For most Linux distributions, the "dev" package for Apache 2 is "apache2-dev" where the corresponding Apache package was "apache2". Some systems however distinguish the "dev" package based on which MPM is used by Apache. As such, it may also be called "apache2-worker-dev" or "apache2-prefork-dev". If using Apache 2, do not mix things up and install "apache-dev" by mistake, which is the "dev" package for Apache 1.3 called just "apache". If these programs are not in a standard location, they cannot be found in your PATH, or you wish to use alternate versions to those found, the "--with-apxs" and "--with-python" options can be used in conjunction with the "configure" script. ./configure --with-apxs=/usr/local/apache/bin/apxs \ --with-python=/usr/local/bin/python If using SUSE Linux, it will be necessary to use the "--with-apxs" option and specify either "/usr/sbin/apxs2-worker" or "/usr/sbin/apxs2-prefork". This is necessary as SUSE Linux allows installation of "dev" package for both Apache MPM variants at the same time, whereas other Linux distributions do not. Although mod_wsgi when compiled isn't specific to either the worker or prefork MPMs, it does include the MPM specific header file to access information about default listener socket locking mechanism as used my multiprocess server configuration. Thus, if one of the MPM specific versions of "apxs2" is not used, compilation will fail due to not being able to find the "mpm.h" header file. Note that any of the major Apache versions should be able to be used, ie., all of Apache 1.3, 2.0 and 2.2 should be compatible with this package. You will however need to compile the package separately against each version and use the resultant Apache module only with the version it was compiled for. Which ever version of Apache is used however, it must support dynamic loading of Apache modules. If you have multiple versions of Python installed and you are not using that which is the default, you may have to organise that the PATH inherited by the Apache application when run will result in Apache finding the alternate version. Alternatively, the WSGIPythonHome directive should be used to specify the exact location of the Python installation corresponding to the version of Python compiled against. If this is not done, the version of Python running within Apache may attempt to use the Python modules from the wrong version of Python. Also note that the Apache module will be bound to the specific major/minor version of Python being used. If you ever upgrade to a newer version of Python, you will need to rebuild the mod_wsgi module. Once the package has been configured, it can be built by running: make The only product of the build process that needs to be installed is the Apache module itself. There are no separate Python code files as everything is done within C code compiled into the Apache module. To install the Apache module into the standard location for Apache modules as dictated by Apache for your installation, run: make install Installation should be done as the root user if appropriate. If you want to install the Apache module in a non standard location dictated by how your operating system distribution structures the configuration files and modules for Apache, you will need to copy the file manually into place. If you are using Apache 1.3 the compiled Apache module can be found in the same directory as this "README" file and is called "mod_wsgi.so". If you are using Apache 2.X the compiled Apache module can be found in the ".libs" subdirectory and is again called "mod_wsgi.so". The name of the file should be kept the same when copied into its appropriate location. To cleanup after installation, run: make clean If you need to build the module for a different version of Apache, you should run: make distclean and then rerun "configure" against the alternate version of Apache before attempting to run "make" again. Apache Configuration ==================== Once the Apache module has been installed into your Apache installation's module directory, it is still necessary to configure Apache to actually load the module. Exactly how this is done and in which of the main Apache configuration files it should be placed, is dependent on which version of Apache you are using and may also be influenced by how your operating system's Apache distribution has organised the Apache configuration files. You may therefore need to check with any documentation for your operating system to see in what way the procedure may need to be modified. In the simplest case, all that is required is to add a line of the form: LoadModule wsgi_module modules/mod_wsgi.so into the main Apache "httpd.conf" configuration file at the same point that other Apache modules are being loaded. The last option to the directive should either be an absolute path to where the mod_wsgi module file is located, or a path expressed relative to the root of your Apache installation. If you used "make" to install the package, see where it copied the file to work out what to set this value to. With Apache 1.3, it would also be necessary to add a line of the form: AddModule mod_wsgi.c For Apache 1.3, the LoadModule and AddModule lines related to mod_wsgi must appear prior to those for the Apache code module called mod_alias. If you wish to use mod_python at the same time as mod_wsgi, then mod_python must be compiled against the same version of Python. In addition to the same version of Python being used, you should use a version of Python which has been configured and compiled so as to generate a shared library for Python. If you do not do this and either mod_python or mod_wsgi are compiled against a static library for Python, it is likely that either mod_python or mod_wsgi will crash Apache when used. Note that this is not the fault of either mod_python or mod_wsgi but arises purely because your Python installation isn't using a shared library for the Python library. The result of such a configuration means that there are actually two copies of the Python static library objects in memory at the same time and this can cause problems. Linux distributions where this is known to be a problem are any of the RedHat derived distributions. Other distributions such as Ubuntu do not have a problem as they use a shared library for the Python library. Having adding the required directives you should perform a restart of Apache to check everything is okay. apachectl restart If you see any sort of problem, or if you are upgrading from an older version of mod_wsgi, it is recommended you actually stop/start Apache instead. apachectl stop apachectl start Note that on some Linux systems using 'apachectl' may not work properly. On these systems, if you have problems using 'apachectl' ensure instead you use the init.d script for Apache instead. /etc/init.d/httpd stop /etc/init.d/httpd start The init.d script may be called 'apache2'. Similarly, the 'apachectl' script may even be called 'apachectl2' on some systems where they allow both Apache 1.3 and Apache 2.X to be installed at the same time. In general, for any system where you are using a prepackaged version of Apache, it is wise to always check the documentation for that package or system to determine the correct way to stop/start the Apache service. This is because they often use a wrapper around 'apachectl', or replace it, with a script which performs additional actions. If all is okay, you should see a line of the form: Apache/2.2.2 (Unix) mod_wsgi/1.0 Python/2.3.5 configured in the Apache error log file. If Apache is configured to also load mod_python, it would instead be: Apache/2.2.2 (Unix) mod_python/3.3.1 Python/2.3.5 mod_wsgi/1.0 configured That "Python" is listed before "mod_wsgi" is indicative of the fact that when both modules are being loaded, mod_wsgi will leave it up to mod_python to initialise Python. Note that mod_wsgi logs various detailed information about interpreter creation, script loading and reloading etc, but it logs with log level of 'info'. As the default for the Apache LogLevel directive is usually 'warn', such information will not be displayed in the Apache error log file. If you are new to mod_wsgi or need to debug issues with its use, it is recommended to change the Apache LogLevel directive to 'info' so that the information is displayed. For example: LogLevel info If only wishing to enable this level of log information for a single VirtualHost this can be done by specifying the directive in the context of the VirtualHost container, but by doing so, only the more detailed information which is specific to a request against that virtual host will be output. Enabling WSGI Application ========================= The mechanisms used to configure mod_wsgi are similar to that used by the Apache mod_cgi module to enable traditional CGI scripts. For example, whereas mod_cgi has the ScriptAlias directive, mod_wsgi has the WSGIScriptAlias directive. Like with mod_cgi, this directive can only appear in the main Apache configuration files. The directive would be used at server scope or within the VirtualHost container and cannot be used within either of the Location, Directory or Files container directives, neither can it be used within a ".htaccess" files. The first way of using the WSGIScriptAlias directive to indicate the WSGI application to be used, is to associate a WSGI application against a specific URL prefix. WSGIScriptAlias /myapp /usr/local/wsgi/scripts/myapp.wsgi The last option to the directive in this case must be a full pathname to the actual code file containing the WSGI application. The WSGI application contained within the code file should be called "application". For example: def application(environ, start_response): status = '200 OK' output = 'Hello World!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] Note that an absolute pathname must be used. It is not possible to specify an application by Python module name alone. A full path is used for a number of reasons, the main one being so that all the Apache access controls can still be applied to indicate who can actually access the WSGI application. Because these access controls will apply, if the WSGI application is located outside of any directories already known to Apache, it will be necessary to tell Apache that files within that directory can be used. To do this the Directory directive must be used. Order allow,deny Allow from all The second way of using the WSGIScriptAlias directive is to use it to map to a directory containing any number of WSGI applications. WSGIScriptAlias /wsgi/ /usr/local/wsgi/scripts/ When this is used, the next part of the URL after the URL prefix is used to identify which WSGI application code file within the target directory should be used. Note that by default each application is placed into its own distinct application group. This means that each application will be given its own distinct Python sub interpreter to run code within. Although this means that applications will be isolated and cannot interfere with the Python code components of each other, each will load its own copy of all Python modules it requires into memory. If you have many applications and they use a lot of different Python modules this can result in large process sizes. To avoid large process sizes, if you know that applications within a directory can safely coexist and run together within the same Python sub interpreter, you can specify that all applications within a certain context should be placed in the same application group. This is indicated by using the WSGIApplicationGroup directive. The argument to the directive can be any unique name of your choosing. WSGIApplicationGroup admin-scripts Order allow,deny Allow from all The above only begins to describe the different ways in which mod_wsgi can be enabled for use. It does not cover running an application in 'daemon' mode not issues such as reloading of application script files, additional means of managing Python sub interpreters, or how to supply configuration information to WSGI applications. Thus, please ensure you refer to the documentation on the mod_wsgi web site for further information. Enjoy Graham Dumpleton mod_wsgi-3.4/win32-ap22py26.mk000644 000765 000024 00000001613 12015317464 016354 0ustar00grahamstaff000000 000000 CPPFLAGS = \ /DWIN32 \ /DNDEBUG \ /I"c:\Program Files\Microsoft Visual Studio 9.0\VC\include" \ /I"c:\Program Files\Microsoft SDKs\Windows\v6.0A\Include" \ /I"c:\Program Files\Apache Software Foundation\Apache2.2\include" \ /I"c:\Python26\include" CFLAGS = \ /MD \ /GF \ /Gy \ /O2 \ /Wall \ /Zc:wchar_t \ /Zc:forScope LDFLAGS = \ /link \ "/LIBPATH:c:\Program Files\Microsoft Visual Studio 9.0\VC\lib" \ "/LIBPATH:c:\Program Files\Microsoft SDKs\Windows\v6.0A\Lib" \ "/LIBPATH:c:\Program Files\Apache Software Foundation\Apache2.2\lib" \ /LIBPATH:c:\Python26\libs \ /OPT:REF \ /OPT:ICF=2 \ /RELEASE \ /SUBSYSTEM:WINDOWS LDLIBS = \ python26.lib \ libhttpd.lib \ libapr-1.lib \ libaprutil-1.lib mod_wsgi.so : mod_wsgi.c cl $(CPPFLAGS) $(CFLAGS) $? /LD $(LDFLAGS) $(LDLIBS) /OUT:$@ mt -manifest $@.manifest -outputresource:$@;2 clean : del *.obj *.so *.so.manifest *.lib *.exp mod_wsgi-3.4/win32-ap22py27.mk000644 000765 000024 00000001613 12015317464 016355 0ustar00grahamstaff000000 000000 CPPFLAGS = \ /DWIN32 \ /DNDEBUG \ /I"c:\Program Files\Microsoft Visual Studio 9.0\VC\include" \ /I"c:\Program Files\Microsoft SDKs\Windows\v6.0A\Include" \ /I"c:\Program Files\Apache Software Foundation\Apache2.2\include" \ /I"c:\Python27\include" CFLAGS = \ /MD \ /GF \ /Gy \ /O2 \ /Wall \ /Zc:wchar_t \ /Zc:forScope LDFLAGS = \ /link \ "/LIBPATH:c:\Program Files\Microsoft Visual Studio 9.0\VC\lib" \ "/LIBPATH:c:\Program Files\Microsoft SDKs\Windows\v6.0A\Lib" \ "/LIBPATH:c:\Program Files\Apache Software Foundation\Apache2.2\lib" \ /LIBPATH:c:\Python27\libs \ /OPT:REF \ /OPT:ICF=2 \ /RELEASE \ /SUBSYSTEM:WINDOWS LDLIBS = \ python27.lib \ libhttpd.lib \ libapr-1.lib \ libaprutil-1.lib mod_wsgi.so : mod_wsgi.c cl $(CPPFLAGS) $(CFLAGS) $? /LD $(LDFLAGS) $(LDLIBS) /OUT:$@ mt -manifest $@.manifest -outputresource:$@;2 clean : del *.obj *.so *.so.manifest *.lib *.exp mod_wsgi-3.4/win32-ap22py31.mk000644 000765 000024 00000001613 12015317464 016350 0ustar00grahamstaff000000 000000 CPPFLAGS = \ /DWIN32 \ /DNDEBUG \ /I"c:\Program Files\Microsoft Visual Studio 9.0\VC\include" \ /I"c:\Program Files\Microsoft SDKs\Windows\v6.0A\Include" \ /I"c:\Program Files\Apache Software Foundation\Apache2.2\include" \ /I"c:\Python31\include" CFLAGS = \ /MD \ /GF \ /Gy \ /O2 \ /Wall \ /Zc:wchar_t \ /Zc:forScope LDFLAGS = \ /link \ "/LIBPATH:c:\Program Files\Microsoft Visual Studio 9.0\VC\lib" \ "/LIBPATH:c:\Program Files\Microsoft SDKs\Windows\v6.0A\Lib" \ "/LIBPATH:c:\Program Files\Apache Software Foundation\Apache2.2\lib" \ /LIBPATH:c:\Python31\libs \ /OPT:REF \ /OPT:ICF=2 \ /RELEASE \ /SUBSYSTEM:WINDOWS LDLIBS = \ python31.lib \ libhttpd.lib \ libapr-1.lib \ libaprutil-1.lib mod_wsgi.so : mod_wsgi.c cl $(CPPFLAGS) $(CFLAGS) $? /LD $(LDFLAGS) $(LDLIBS) /OUT:$@ mt -manifest $@.manifest -outputresource:$@;2 clean : del *.obj *.so *.so.manifest *.lib *.exp