pax_global_header00006660000000000000000000000064145263607470014530gustar00rootroot0000000000000052 comment=3c417a5fda9e136ced0217f275b5cd7dd298eeab mod_wsgi-5.0.0/000077500000000000000000000000001452636074700133425ustar00rootroot00000000000000mod_wsgi-5.0.0/.github/000077500000000000000000000000001452636074700147025ustar00rootroot00000000000000mod_wsgi-5.0.0/.github/workflows/000077500000000000000000000000001452636074700167375ustar00rootroot00000000000000mod_wsgi-5.0.0/.github/workflows/main.yml000066400000000000000000000046451452636074700204170ustar00rootroot00000000000000on: push: branches: - develop tags: - "[0-9]+.[0-9]+.[0-9]+" - "[0-9]+.[0-9]+.[0-9]+rc[0-9]+" pull_request: branches: - develop jobs: build: name: "Build mod_wsgi packages" runs-on: "ubuntu-20.04" strategy: fail-fast: false matrix: python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - uses: "actions/checkout@v4" - uses: "actions/setup-python@v4" with: python-version: "${{ matrix.python-version }}" - name: "Update package details" run: sudo apt --fix-missing update - name: "Install Apache package" run: sudo apt install -y apache2-dev - name: "Build mod_wsgi packages" run: ./package.sh && ls -las dist - name: "Store built packages" uses: actions/upload-artifact@v3 with: name: dist ${{ matrix.python-version }} path: dist/* tests: name: "Test mod_wsgi package (Python ${{ matrix.python-version }})" runs-on: "ubuntu-20.04" needs: - build strategy: fail-fast: false matrix: python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] steps: - uses: "actions/checkout@v4" - uses: "actions/setup-python@v4" with: python-version: "${{ matrix.python-version }}" - name: "Download built packages" uses: actions/download-artifact@v3 with: name: dist ${{ matrix.python-version }} path: dist - name: "Update package details" run: sudo apt --fix-missing update - name: "Install Apache package" run: sudo apt install -y apache2-dev - name: "Update pip installation" run: python -m pip install --upgrade pip setuptools wheel - name: "Install mod_wsgi" run: python -m pip install --verbose dist/mod_wsgi-[0-9].*.tar.gz - name: "Run mod_wsgi-express test #1" run: scripts/run-single-test.sh - name: "Uninstall mod_wsgi" run: pip uninstall --yes mod_wsgi - name: "Install mod_wsgi-standalone" run: python -m pip install --verbose dist/mod_wsgi-standalone-[0-9].*.tar.gz - name: "Run mod_wsgi-express test #2" run: scripts/run-single-test.sh - name: "Uninstall mod_wsgi-standalone" run: pip uninstall --yes mod_wsgi-standalone - name: "Verify configure/make/make install" run: ./configure && make && sudo make install mod_wsgi-5.0.0/.gitignore000066400000000000000000000003421452636074700153310ustar00rootroot00000000000000Makefile autom4te.cache config.log config.status .libs *.o *.la *.lo *.loT *.slo build dist apxs_config.py *.egg-info *.swp bin include lib .Python *.pyc .tox apxs libtool docs/_build newrelic.ini src/packages venv httpd-test mod_wsgi-5.0.0/.mailmap000066400000000000000000000001231452636074700147570ustar00rootroot00000000000000Graham Dumpleton Graham.Dumpleton mod_wsgi-5.0.0/.readthedocs.yaml000066400000000000000000000011011452636074700165620ustar00rootroot00000000000000# .readthedocs.yaml # Read the Docs configuration file # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the version of Python and other tools you might need build: os: ubuntu-22.04 tools: python: "3.11" # Build documentation in the docs/ directory with Sphinx sphinx: configuration: docs/conf.py # We recommend specifying your dependencies to enable reproducible builds: # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html # python: # install: # - requirements: docs/requirements.txt mod_wsgi-5.0.0/CREDITS.rst000066400000000000000000000004571452636074700151770ustar00rootroot00000000000000======= CREDITS ======= The mod_wsgi source code incorporates C functions for calculating memory usage from: :Author: David Robert Nadeau :Site: http://NadeauSoftware.com/ :License: Creative Commons Attribution 3.0 Unported License http://creativecommons.org/licenses/by/3.0/deed.en_US mod_wsgi-5.0.0/LICENSE000066400000000000000000000261361452636074700143570ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mod_wsgi-5.0.0/MANIFEST.in000066400000000000000000000007361452636074700151060ustar00rootroot00000000000000include configure.ac include configure include LICENSE include Makefile.in include README.rst include README-standalone.rst include CREDITS.rst include src/server/*.h include src/server/*.c exclude src/server/apxs_config.py include tests/* include docs/_build/html/* include docs/_build/html/_static/* include docs/_build/html/_static/css/* include docs/_build/html/_static/font/* include docs/_build/html/_static/js/* include docs/_build/html/_sources/* include pyproject.toml mod_wsgi-5.0.0/Makefile.in000066400000000000000000000027761452636074700154230ustar00rootroot00000000000000# Copyright 2007-2011 GRAHAM DUMPLETON # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. APXS = @APXS@ PYTHON = @PYTHON@ DESTDIR = @DESTDIR@ LIBEXECDIR = @LIBEXECDIR@ CPPFLAGS = @CPPFLAGS@ CFLAGS = @CFLAGS@ LDFLAGS = @LDFLAGS@ LDLIBS = @LDLIBS@ SRCFILES = src/server/mod_wsgi.c src/server/wsgi_*.c all : src/server/mod_wsgi.la src/server/mod_wsgi.la : $(SRCFILES) $(APXS) -c $(CPPFLAGS) $(CFLAGS) $(SRCFILES) $(LDFLAGS) $(LDLIBS) $(DESTDIR)$(LIBEXECDIR) : mkdir -p $@ install : all $(DESTDIR)$(LIBEXECDIR) $(APXS) -i -S LIBEXECDIR=$(DESTDIR)$(LIBEXECDIR) -n 'mod_wsgi' src/server/mod_wsgi.la clean : -rm -rf src/server/.libs -rm -f src/server/*.o -rm -f src/server/*.la -rm -f src/server/*.lo -rm -f src/server/*.slo -rm -f src/server/*.loT -rm -f config.log -rm -f config.status -rm -rf autom4te.cache -rm -rf mod_wsgi.egg-info -rm -rf build -rm -rf dist distclean : clean -rm -f Makefile -rm -f apxs libtool -rm -rf .Python bin lib include -rm -rf .tox realclean : distclean -rm -f configure mod_wsgi-5.0.0/README-standalone.rst000066400000000000000000000040321452636074700171560ustar00rootroot00000000000000Overview -------- The mod_wsgi package provides an Apache module that implements a WSGI compliant interface for hosting Python based web applications on top of the Apache web server. The primary package for mod_wsgi is available on the Python package index (PyPi) as ``mod_wsgi``. That package assumes that you have a suitable version of Apache pre-installed on your target system, and if you don't, installation of the package will fail. If you are on a UNIX like system (Linux) and need a version of Apache to be installed for you, you can use the ``mod_wsgi-standalone`` package on PyPi instead. When installing the ``mod_wsgi-standalone`` package it will first trigger the installation of the ``mod_wsgi-httpd`` package, which will result in a version of Apache being installed as part of your Python installation. Next the ``mod_wsgi`` package will be installed, with it using the version of Apache installed by the ``mod_wsgi-httpd`` package rather than any system package for Apache. This method of installation is only suitable for where you want to use ``mod_wsgi-express``. It cannot be used to build mod_wsgi for use with your system Apache installation. This installation method will not work on Windows, and also currently fails on macOS because the Apache Runtime (APR) library, has not been updated to latest macOS versions. When installing mod_wsgi using this method, except that you will install the ``mod_wsgi-standalone`` package instead of the ``mod_wsgi`` package, you should follow installation and usage instructions outlined on the PyPi page for the ``mod_wsgi`` package. **NOTE: Although this package may allow you to install a standalone Apache version, it is only really recommended that you use this package if you have absolutely no other choice for getting the Apache httpd server installed. Always use the Apache httpd server supplied with the operating system if you can. Building this package if you do choose to do so, will take some time. So if you you think the install is hanging, it is probably still busy compiling everything.** mod_wsgi-5.0.0/README.rst000066400000000000000000000437601452636074700150430ustar00rootroot00000000000000Overview -------- The mod_wsgi package provides an Apache module that implements a WSGI compliant interface for hosting Python based web applications on top of the Apache web server. *Note that the major version 5.0 was introduced not because of any new major features but because from version 5.0 onwards compatability with Python 2.7 is no longer guaranteed. A minimum Python version of 3.8 will be enforced by the Python package installation configuration.* Installation of mod_wsgi from source code can be performed in one of two ways. The first way of installing mod_wsgi is the traditional way that has been used by many software packages. This is where it is installed as a module directly into your Apache installation using the commands ``configure``, ``make`` and ``make install``, a method sometimes referred to by the acronym CMMI. This method works with most UNIX type systems. It cannot be used on Windows. The second way of installing mod_wsgi is to install it as a Python package into your Python installation using the Python ``pip install`` command. This can be used on all platforms, including Windows. This second way of installing mod_wsgi will compile not only the Apache module for mod_wsgi, but will also install a Python module and admin script, which on UNIX type systems can be used to start up a standalone instance of Apache directly from the command line with an auto generated configuration. This later mechanism for installing mod_wsgi using Python ``pip`` is a much simpler way of getting starting with hosting your Python web application. In particular, this installation method makes it very easy to use Apache/mod_wsgi in a development environment without the need to perform any Apache configuration yourself. The Apache module for mod_wsgi created when using the ``pip install`` method can still be used with the main Apache installation, via manual configuration if necessary. As detailed later in these instructions, the admin script installed when you use ``pip install`` can be used to generate the configuration to manually add to the Apache configuration to load mod_wsgi. Note that although MacOS X is a UNIX type system, the ``pip install`` method is the only supported way for installing mod_wsgi. Since MacOS X Sierra, Apple has completely broken the ability to install third party Apache modules using the ``apxs`` tool normally used for this task. History suggests that Apple will never fix the problem as they have broken things in the past in other ways and workarounds were required as they never fixed those problems either. This time there is no easy workaround as they no longer supply certain tools which are required to perform the installation. The ``pip install`` method along with the manual configuration of Apache is also the method you need to use on Windows. System Requirements ------------------- With either installation method for mod_wsgi, you must have Apache installed. This must be a complete Apache installation. It is not enough to have only the runtime packages for Apache installed. You must have the corresponding development package for Apache installed, which contains the Apache header files, as these are required to be able to compile and install third party Apache modules. Similarly with Python, you must have a complete Python installation which includes the corresponding development package, which contains the header files for the Python library. If you are running Debian or Ubuntu Linux with Apache 2.2 system packages, and were using the Apache prefork MPM you would need both: * apache2-mpm-prefork * apache2-prefork-dev If instead you were using the Apache worker MPM, you would need both: * apache2-mpm-worker * apache2-threaded-dev If you are running Debian or Ubuntu Linux with Apache 2.4 system packages, regardless of which Apache MPM is being used, you would need both: * apache2 * apache2-dev If you are running RHEL, CentOS or Fedora, you would need both: * httpd * httpd-devel If you are using the Software Collections Library (SCL) packages with RHEL, CentOS or Fedora, you would need: * httpd24 * httpd24-httpd-devel If you are running MacOS X, Apache is supplied with the operating system. If running a recent MacOS X version, you will though need to have the Xcode command line tools installed as well as the Xcode application. The command line tools can be installed by running ``xcode-select --install``. The Xcode application can be installed from the MacOS X App Store. If you are using older MacOS X versions, you may be able to get away with having just the command line tools. If you are running Windows, it is recommended you use the Apache distribution from Apache Lounge (www.apachelounge.com). Other Apache distributions for Windows aren't always complete and are missing the files needed to compile additional Apache modules. By default, it is expected that Apache is installed in the directory ``C:/Apache24`` on Windows. If you are on Linux, macOS or other UNIX type operating system and can't or don't want to use the system package for Apache, you can use ``pip`` to install mod_wsgi, but you should use use the ``mod_wsgi-standalone`` package on PyPi instead of the ``mod_wsgi`` package. Installation into Apache ------------------------ For installation directly into your Apache installation using the CMMI method, see the full documentation at: * http://www.modwsgi.org/ Alternatively, use the following instructions to install mod_wsgi into your Python installation and then either copy the mod_wsgi module into your Apache installation, or configure Apache to use the mod_wsgi module from the Python installation. When using this approach, you will still need to manually configure Apache to have mod_wsgi loaded into Apache, and for it to know about your WSGI application. Installation into Python ------------------------ To install mod_wsgi directly into your Python installation, from within the source directory of the mod_wsgi package you can run:: python setup.py install This will compile mod_wsgi and install the resulting package into your Python installation. If wishing to install an official release direct from the Python Package Index (PyPi), you can instead run:: pip install mod_wsgi If you wish to use a version of Apache which is installed into a non standard location, you can set and export the ``APXS`` environment variable to the location of the Apache ``apxs`` script for your Apache installation before performing the installation. If you are using Linux, macOS or other UNIX type operating system, and you don't have Apache available, you can instead install mod_wsgi using:: pip install mod_wsgi-standalone When installing ``mod_wsgi-standalone``, it will also install a version of Apache into your Python distribution. You can only use ``mod_wsgi-express`` when using this variant of the package. The ``mod_wsgi-standalone`` package follows the same version numbering as the ``mod_wsgi`` package on PyPi. If you are on Windows and your Apache distribution is not installed into the directory ``C:/Apache24``, first set the environment variable ``MOD_WSGI_APACHE_ROOTDIR`` to the directory containing the Apache distribution. Ensure you use forward slashes in the directory path. The directory path should not include path components with spaces in the name. Note that nothing will be copied into your Apache installation at this point. As a result, you do not need to run this as the root user unless installing it into a site wide Python installation rather than a Python virtual environment. It is recommended you always use Python virtual environments and never install any Python package directly into the system Python installation. On a UNIX type system, to verify that the installation was successful, run the ``mod_wsgi-express`` script with the ``start-server`` command:: mod_wsgi-express start-server This will start up Apache/mod_wsgi on port 8000. You can then verify that the installation worked by pointing your browser at:: http://localhost:8000/ When started in this way, the Apache web server will stay in the foreground. To stop the Apache server, use CTRL-C. For a simple WSGI application contained in a WSGI script file called ``wsgi.py``, in the current directory, you can now run:: mod_wsgi-express start-server wsgi.py This instance of the Apache web server will be completely independent of, and will not interfere with any existing instance of Apache you may have running on port 80. If you already have another web server running on port 8000, you can override the port to be used using the ``--port`` option:: mod_wsgi-express start-server wsgi.py --port 8080 For a complete list of options you can run:: mod_wsgi-express start-server --help For further information related to using ``mod_wsgi-express`` see the main mod_wsgi documentation. Non standard Apache installations --------------------------------- Many Linux distributions have a tendency to screw around with the standard Apache Software Foundation layout for installation of Apache. This can include renaming the Apache ``httpd`` executable to something else, and in addition to potentially renaming it, replacing the original binary with a shell script which performs additional actions which can only be performed as the ``root`` user. In the case of the ``httpd`` executable simply being renamed, the executable will obviously not be found and ``mod_wsgi-express`` will fail to start at all. In this case you should work out what the ``httpd`` executable was renamed to and use the ``--httpd-executable`` option to specify its real location. For example, if ``httpd`` was renamed to ``apache2``, you might need to use:: mod_wsgi-express start-server wsgi.py --httpd-executable=/usr/sbin/apache2 In the case of the ``httpd`` executable being replaced with a shell script which performs additional actions before then executing the original ``httpd`` executable, and the shell script is failing in some way, you will need to use the location of the original ``httpd`` executable the shell script is in turn executing. Running mod_wsgi-express as root -------------------------------- The primary intention of ``mod_wsgi-express`` is to make it easier for users to run up Apache on non privileged ports, especially during the development of a Python web application. If you want to be able to run Apache using ``mod_wsgi-express`` on a privileged port such as the standard port 80 used by HTTP servers, then you will need to run ``mod_wsgi-express`` as root. In doing this, you will need to perform additional steps. The first thing you must do is supply the ``--user`` and ``--group`` options to say what user and group your Python web application should run as. Most Linux distributions will predefine a special user for Apache to run as, so you can use that. Alternatively you can use any other special user account you have created for running the Python web application:: mod_wsgi-express start-server wsgi.py --port=80 \ --user www-data --group www-data This approach to running ``mod_wsgi-express`` will be fine so long as you are using a process supervisor which expects the process being run to remain in the foreground and not daemonize. If however you are directly integrating into the system init scripts where separate start and stop commands are expected, with the executing process expected to be daemonized, then a different process is required to setup ``mod_wsgi-express``. In this case, instead of simply using the ``start-server`` command to ``mod_wsgi-express`` you should use ``setup-server``:: mod_wsgi-express setup-server wsgi.py --port=80 \ --user www-data --group www-data \ --server-root=/etc/mod_wsgi-express-80 In running this command, it will not actually startup Apache. All it will do is create the set of configuration files and the startup script to be run. So that these are not created in the default location of a directory under ``/tmp``, you should use the ``--server-root`` option to specify where they should be placed. Having created the configuration and startup script, to start the Apache instance you can now run:: /etc/mod_wsgi-express-80/apachectl start To subsequently stop the Apache instance you can run:: /etc/mod_wsgi-express-80/apachectl stop You can also restart the Apache instance as necessary using:: /etc/mod_wsgi-express-80/apachectl restart Using this approach, the original options you supplied to ``setup-server`` will be cached with the same configuration used each time. If you need to update the set of options, run ``setup-server`` again with the new set of options. Note that even taking all these steps, it is possible that running up Apache as ``root`` using ``mod_wsgi-express`` may fail on systems where SELinux extensions are enabled. This is because the SELinux profile may not match what is being expected for the way that Apache is being started, or alternatively, the locations that Apache has been specified as being allowed to access, don't match where the directory specified using the ``--server-root`` directory was placed. You may therefore need to configure SELinux or move the directory used with ``--server-root`` to an allowed location. In all cases, any error messages will be logged to a file under the server root directory. If you are using ``mod_wsgi-express`` with a process supervisor, or in a container, where log messages are expected to be sent to the terminal, you can use the ``--log-to-terminal`` option. Using mod_wsgi-express with Django ---------------------------------- To use ``mod_wsgi-express`` with Django, after having installed the mod_wsgi package into your Python installation, edit your Django settings module and add ``mod_wsgi.server`` to the list of installed apps. :: INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'mod_wsgi.server', ) To prepare for running ``mod_wsgi-express``, ensure that you first collect up any Django static file assets into the directory specified for them in the Django settings file:: python manage.py collectstatic You can now run the Apache server with mod_wsgi hosting your Django application by running:: python manage.py runmodwsgi If working in a development environment and you would like to have any code changes automatically reloaded, then you can use the ``--reload-on-changes`` option. :: python manage.py runmodwsgi --reload-on-changes If wanting to have Apache started as root in order to listen on port 80, instead of using ``mod_wsgi-express setup-server`` as described above, use the ``--setup-only`` option to the ``runmodwsgi`` management command. :: python manage.py runmodwsgi --setup-only --port=80 \ --user www-data --group www-data \ --server-root=/etc/mod_wsgi-express-80 This will set up all the required files and you can use ``apachectl`` to start and stop the Apache instance as explained previously. Connecting into Apache installation ----------------------------------- If you want to use mod_wsgi in combination with your system Apache installation, the CMMI method for installing mod_wsgi would normally be used. If you are on MacOS X Sierra that is no longer possible. Even prior to MacOS X Sierra, the System Integrity Protection (SIP) system of MacOS X, prevented installing the mod_wsgi module into the Apache modules directory. If you are using Windows, the CMMI method was never supported as Windows doesn't supply the required tools to make it work. The CMMI installation method also involves a bit more work as you need to separately download the mod_wsgi source code, run the ``configure`` tool and then run ``make`` and ``make install``. The alternative to using the CMMI installation method is to use the Apache mod_wsgi module created by running ``pip install``. This can be directly referenced from the Apache configuration, or copied into the Apache modules directory. To use the Apache mod_wsgi module from where ``pip install`` placed it, run the command ``mod_wsgi-express module-config``. This will output something like:: LoadModule wsgi_module /usr/local/lib/python2.7/site-packages/mod_wsgi/server/mod_wsgi-py27.so WSGIPythonHome /usr/local/lib These are the directives needed to configure Apache to load the mod_wsgi module and tell mod_wsgi where the Python installation directory or virtual environment was located. This would be placed in the Apache ``httpd.conf`` file, or if using a Linux distribution which separates out module configuration into a ``mods-available`` directory, in the ``wsgi.load`` file within the ``mods-available`` directory. In the latter case where a ``mods-available`` directory is used, the module would then be enabled by running ``a2enmod wsgi`` as ``root``. If necessary Apache can then be restarted to verify the module is loading correctly. You can then configure Apache as necessary for your specific WSGI application. Note that because in this scenario the mod_wsgi module for Apache could be located in a Python virtual environment, if you destroy the Python virtual environment the module will also be deleted. In that case you would need to ensure you recreate the Python virtual environment and reinstall the mod_wsgi package using ``pip``, or, take out the mod_wsgi configuration from Apache before restarting Apache, else it will fail to startup. Instead of referencing the mod_wsgi module from the Python installation, you can instead copy the mod_wsgi module into the Apache installation. To do that, run the ``mod_wsgi-express install-module`` command, running it as ``root`` if necessary. This will output something like:: LoadModule wsgi_module modules/mod_wsgi-py27.so WSGIPythonHome /usr/local/lib This is similar to above except that the mod_wsgi module was copied to the Apache modules directory first and the ``LoadModule`` directive references it from that location. You should take these lines and configure Apache in the same way as described above. Do note that copying the module like this will not work on recent versions of MacOS X due to the SIP feature of MacOS X. mod_wsgi-5.0.0/configure000077500000000000000000003535011452636074700152600ustar00rootroot00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME= PACKAGE_TARNAME= PACKAGE_VERSION= PACKAGE_STRING= PACKAGE_BUGREPORT= PACKAGE_URL= ac_unique_file="src/server/mod_wsgi.c" ac_subst_vars='LTLIBOBJS LIBOBJS LIBEXECDIR DESTDIR LDLIBS PYTHON OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC APXS target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_framework enable_embedded with_apxs with_python ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures this package to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/PACKAGE] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF _ACEOF fi if test -n "$ac_init_help"; then cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-framework enable mod_wsgi framework link --disable-embedded disable mod_wsgi embedded mode Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-apxs=NAME name of the apxs executable [[apxs]] --with-python=NAME name of the python executable [[python]] Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF configure generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Check whether --enable-framework was given. if test "${enable_framework+set}" = set; then : enableval=$enable_framework; ENABLE_FRAMEWORK=$enableval else ENABLE_FRAMEWORK=false fi # Check whether --enable-embedded was given. if test "${enable_embedded+set}" = set; then : enableval=$enable_embedded; ENABLE_EMBEDDED=$enableval else ENABLE_EMBEDDED=yes fi # Check whether --with-apxs was given. if test "${with_apxs+set}" = set; then : withval=$with_apxs; APXS="$with_apxs" fi if test -z "${APXS}"; then for ac_prog in apxs2 apxs do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_APXS+:} false; then : $as_echo_n "(cached) " >&6 else case $APXS in [\\/]* | ?:[\\/]*) ac_cv_path_APXS="$APXS" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/local/apache/bin:/usr/sbin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_APXS="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi APXS=$ac_cv_path_APXS if test -n "$APXS"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $APXS" >&5 $as_echo "$APXS" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$APXS" && break done test -n "$APXS" || APXS="apxs" fi XCODE_PREFIX="/Applications/Xcode.app/Contents/Developer/Toolchains/" XCODE_CC="${XCODE_PREFIX}XcodeDefault.xctoolchain/usr/bin/cc" XCODE_BIN_PATTERN="${XCODE_PREFIX}.*/usr/bin/" if test -x "${APXS}"; then APXS_CC=`${APXS} -q CC` APXS_LIBTOOL=`${APXS} -q LIBTOOL | sed -e "s/ .*$//"` case ${APXS_CC} in ${XCODE_PREFIX}*) if test ! -x ${XCODE_CC}; then cat "${APXS_LIBTOOL}" | sed -e \ "s%${XCODE_BIN_PATTERN}%/usr/bin/%" > ./libtool cat "${APXS}" | sed -e "s%get_vars(\"CC\")%\"/usr/bin/cc\"%" \ -e 's%^my $libtool = .*;%my $libtool = \"./libtool\";%' > ./apxs else cat "${APXS_LIBTOOL}" | sed -e \ "s%OSX10.[0-9][0-9]*.xctoolchain%XcodeDefault.xctoolchain%" > ./libtool cat "${APXS}" | sed -e "s%get_vars(\"CC\")%\"${XCODE_CC}\"%" \ -e 's%^my $libtool = .*;%my $libtool = \"./libtool\";%' > ./apxs fi chmod +x ./apxs ./libtool APXS=./apxs ;; *) esac else as_fn_error $? "Apache tool 'apxs' or 'apxs2' is required to build mod_wsgi." "$LINENO" 5 fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu for ac_func in prctl do : ac_fn_c_check_func "$LINENO" "prctl" "ac_cv_func_prctl" if test "x$ac_cv_func_prctl" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_PRCTL 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking Apache version" >&5 $as_echo_n "checking Apache version... " >&6; } HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`" HTTPD_INCLUDEDIR="`${APXS} -q INCLUDEDIR`" if test -x ${HTTPD}; then HTTPD_VERSION=`${HTTPD} -v | awk '/version/ {print $3}' \ | awk -F/ '{print $2}'` else if test -f ${HTTPD_INCLUDEDIR}/ap_release.h; then HTTPD_VERSION=`grep '^#define AP_SERVER_MAJORVERSION_NUMBER ' \ ${HTTPD_INCLUDEDIR}/ap_release.h | sed -e \ 's/^#define AP_SERVER_MAJORVERSION_NUMBER \([0-9]\).*/\1.X/'` else if test -f ${HTTPD_INCLUDEDIR}/httpd.h; then HTTPD_VERSION=`grep '^#define APACHE_RELEASE ' \ ${HTTPD_INCLUDEDIR}/httpd.h | sed -e \ 's/^#define APACHE_RELEASE \([0-9]\).*/\1.X/'` else HTTPD_VERSION="2.?" fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $HTTPD_VERSION" >&5 $as_echo "$HTTPD_VERSION" >&6; } # Check whether --with-python was given. if test "${with_python+set}" = set; then : withval=$with_python; PYTHON="$with_python" fi if test -z "${PYTHON}"; then for ac_prog in python do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_PYTHON+:} false; then : $as_echo_n "(cached) " >&6 else case $PYTHON in [\\/]* | ?:[\\/]*) ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_dummy="$PATH:/usr/local/bin" for as_dir in $as_dummy do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi PYTHON=$ac_cv_path_PYTHON if test -n "$PYTHON"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5 $as_echo "$PYTHON" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$PYTHON" && break done test -n "$PYTHON" || PYTHON="python" fi PYTHON_VERSION=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("VERSION"))'` PYTHON_LDVERSION=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("LDVERSION") or "")'` if test x"${PYTHON_LDVERSION}" = x""; then PYTHON_LDVERSION=${PYTHON_VERSION} fi CPPFLAGS1=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write("-I" + sysconfig.get_config_var("INCLUDEPY"))'` CPPFLAGS2=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(" ".join(filter(lambda x: x.startswith("-D"), \ sysconfig.get_config_var("CFLAGS").split())))'` if test "${ENABLE_EMBEDDED}" != "yes"; then CPPFLAGS3="-DMOD_WSGI_DISABLE_EMBEDDED" else CPPFLAGS3="" fi CPPFLAGS="${CPPFLAGS} ${CPPFLAGS1} ${CPPFLAGS2} ${CPPFLAGS3}" PYTHONLIBDIR=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBDIR"))'` PYTHONCFGDIR=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_path("platstdlib") +"/config")'` PYTHONFRAMEWORKDIR=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORKDIR"))'` PYTHONFRAMEWORKPREFIX=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX"))'` PYTHONFRAMEWORK=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORK"))'` if test "${PYTHON_LDVERSION}" != "${PYTHON_VERSION}"; then PYTHONCFGDIR="${PYTHONCFGDIR}-${PYTHON_LDVERSION}" fi if test "${PYTHONFRAMEWORKDIR}" = "no-framework" -o \ "${ENABLE_FRAMEWORK}" != "yes"; then LDFLAGS1="-L${PYTHONLIBDIR}" LDFLAGS2="-L${PYTHONCFGDIR}" LDLIBS1="-lpython${PYTHON_LDVERSION}" # MacOS X seems to be broken and doesn't use ABIFLAGS suffix # so add a check to try and work out what we need to do. if test -f "${PYTHONLIBDIR}/libpython${PYTHON_VERSION}.a"; then LDLIBS1="-lpython${PYTHON_VERSION}" fi if test -f "${PYTHONCFGDIR}/libpython${PYTHON_VERSION}.a"; then LDLIBS1="-lpython${PYTHON_VERSION}" fi LDLIBS2=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBS"))'` LDLIBS3=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("SYSLIBS"))'` else LDFLAGS1="-Wl,-F${PYTHONFRAMEWORKPREFIX} -framework ${PYTHONFRAMEWORK}" VERSION="${PYTHON_VERSION}" STRING="${PYTHONFRAMEWORKDIR}/Versions/${VERSION}/${PYTHONFRAMEWORK}" LDFLAGS2=`${PYTHON} -c "from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var( \"LINKFORSHARED\").replace(\"${STRING}\", ''))" | \ sed -e 's/-Wl,-stack_size,[0-9]*//'` LDLIBS1=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBS"))'` fi CFLAGS1="" for arg in ${CFLAGS} do CFLAGS1="${CFLAGS1} -Wc,$arg" done CFLAGS2="" if test -x /usr/bin/lipo; then LDFLAGS3="" ARCHITECTURES=`/usr/bin/lipo -info $HTTPD | sed -e 's/.*://'` for ARCH in $ARCHITECTURES; do CFLAGS2="${CFLAGS2} -Wc,'-arch ${ARCH}'" LDFLAGS3="${LDFLAGS3} -arch ${ARCH}" done fi CFLAGS="${CFLAGS1} ${CFLAGS2}" LDFLAGS="${LDFLAGS} ${LDFLAGS1} ${LDFLAGS2} ${LDFLAGS3}" LDLIBS="${LDLIBS} ${LDLIBS1} ${LDLIBS2} ${LDLIBS3}" LIBEXECDIR="`${APXS} -q LIBEXECDIR`" HTTPD_MAJOR_VERSION=`echo ${HTTPD_VERSION} | sed -e 's/\..*//'` ac_config_files="$ac_config_files Makefile" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' # Transform confdefs.h into DEFS. # Protect against shell expansion while executing Makefile rules. # Protect against Makefile macro expansion. # # If the first sed substitution is executed (which looks for macros that # take arguments), then branch to the quote section. Otherwise, # look for a macro that doesn't take arguments. ac_script=' :mline /\\$/{ N s,\\\n,, b mline } t clear :clear s/^[ ]*#[ ]*define[ ][ ]*\([^ (][^ (]*([^)]*)\)[ ]*\(.*\)/-D\1=\2/g t quote s/^[ ]*#[ ]*define[ ][ ]*\([^ ][^ ]*\)[ ]*\(.*\)/-D\1=\2/g t quote b any :quote s/[ `~#$^&*(){}\\|;'\''"<>?]/\\&/g s/\[/\\&/g s/\]/\\&/g s/\$/$$/g H :any ${ g s/^\n// s/\n/ /g p } ' DEFS=`sed -n "$ac_script" confdefs.h` ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by $as_me, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE Configuration files: $config_files Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ config.status configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --he | --h | --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" eval set X " :F $CONFIG_FILES " shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi mod_wsgi-5.0.0/configure.ac000066400000000000000000000171731452636074700156410ustar00rootroot00000000000000dnl vim: set sw=4 expandtab : dnl dnl Copyright 2007-2023 GRAHAM DUMPLETON dnl dnl Licensed under the Apache License, Version 2.0 (the "License"); dnl you may not use this file except in compliance with the License. dnl You may obtain a copy of the License at dnl dnl http://www.apache.org/licenses/LICENSE-2.0 dnl dnl Unless required by applicable law or agreed to in writing, software dnl distributed under the License is distributed on an "AS IS" BASIS, dnl WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. dnl See the License for the specific language governing permissions and dnl limitations under the License. dnl Process this file with autoconf to produce a configure script. AC_INIT(src/server/mod_wsgi.c) AC_ARG_ENABLE(framework, AC_HELP_STRING([--enable-framework], [enable mod_wsgi framework link]), [ENABLE_FRAMEWORK=$enableval], [ENABLE_FRAMEWORK=false]) AC_ARG_ENABLE(embedded, AC_HELP_STRING([--disable-embedded], [disable mod_wsgi embedded mode]), [ENABLE_EMBEDDED=$enableval], [ENABLE_EMBEDDED=yes]) AC_ARG_WITH(apxs, AC_HELP_STRING([--with-apxs=NAME], [name of the apxs executable [[apxs]]]), [APXS="$with_apxs"]) if test -z "${APXS}"; then AC_PATH_PROGS(APXS, apxs2 apxs, [apxs], [$PATH:/usr/local/apache/bin:/usr/sbin]) fi XCODE_PREFIX="/Applications/Xcode.app/Contents/Developer/Toolchains/" XCODE_CC="${XCODE_PREFIX}XcodeDefault.xctoolchain/usr/bin/cc" XCODE_BIN_PATTERN="${XCODE_PREFIX}.*/usr/bin/" if test -x "${APXS}"; then APXS_CC=`${APXS} -q CC` APXS_LIBTOOL=`${APXS} -q LIBTOOL | sed -e "s/ .*$//"` APXS_LIBDIR=`${APXS} -q LIBDIR | sed -e "s/ .*$//"` case ${APXS_CC} in ${XCODE_PREFIX}*) if test ! -x ${XCODE_CC}; then cat "${APXS_LIBTOOL}" | sed -e \ "s%${XCODE_BIN_PATTERN}%/usr/bin/%" > ./libtool cat "${APXS}" | sed -e "s%get_vars(\"CC\")%\"/usr/bin/cc\"%" \ -e 's%^my $libtool = .*;%my $libtool = \"./libtool\";%' > ./apxs else cat "${APXS_LIBTOOL}" | sed -e \ "s%OSX10.[[0-9]][[0-9]]*.xctoolchain%XcodeDefault.xctoolchain%" > ./libtool cat "${APXS}" | sed -e "s%get_vars(\"CC\")%\"${XCODE_CC}\"%" \ -e 's%^my $libtool = .*;%my $libtool = \"./libtool\";%' > ./apxs fi chmod +x ./apxs ./libtool APXS=./apxs ;; *) esac else AC_MSG_ERROR([Apache tool 'apxs' or 'apxs2' is required to build mod_wsgi.]) fi AC_SUBST(APXS) AC_CHECK_FUNCS(prctl) AC_MSG_CHECKING(Apache version) HTTPD="`${APXS} -q SBINDIR`/`${APXS} -q TARGET`" HTTPD_INCLUDEDIR="`${APXS} -q INCLUDEDIR`" if test -x ${HTTPD}; then HTTPD_VERSION=`${HTTPD} -v | awk '/version/ {print $3}' \ | awk -F/ '{print $2}'` else if test -f ${HTTPD_INCLUDEDIR}/ap_release.h; then HTTPD_VERSION=`grep '^#define AP_SERVER_MAJORVERSION_NUMBER ' \ ${HTTPD_INCLUDEDIR}/ap_release.h | sed -e \ 's/^#define AP_SERVER_MAJORVERSION_NUMBER \([0-9]\).*/\1.X/'` else if test -f ${HTTPD_INCLUDEDIR}/httpd.h; then HTTPD_VERSION=`grep '^#define APACHE_RELEASE ' \ ${HTTPD_INCLUDEDIR}/httpd.h | sed -e \ 's/^#define APACHE_RELEASE \([0-9]\).*/\1.X/'` else HTTPD_VERSION="2.?" fi fi fi AC_MSG_RESULT($HTTPD_VERSION) AC_ARG_WITH(python, AC_HELP_STRING([--with-python=NAME], [name of the python executable [[python]]]), [PYTHON="$with_python"]) if test -z "${PYTHON}"; then AC_PATH_PROGS(PYTHON, python, [python], [$PATH:/usr/local/bin]) fi AC_SUBST(PYTHON) PYTHON_VERSION=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("VERSION"))'` PYTHON_LDVERSION=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("LDVERSION") or "")'` if test x"${PYTHON_LDVERSION}" = x""; then PYTHON_LDVERSION=${PYTHON_VERSION} fi CPPFLAGS1=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write("-I" + sysconfig.get_config_var("INCLUDEPY"))'` CPPFLAGS2=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(" ".join(filter(lambda x: x.startswith("-D"), \ sysconfig.get_config_var("CFLAGS").split())))'` if test "${ENABLE_EMBEDDED}" != "yes"; then CPPFLAGS3="-DMOD_WSGI_DISABLE_EMBEDDED" else CPPFLAGS3="" fi CPPFLAGS="${CPPFLAGS} ${CPPFLAGS1} ${CPPFLAGS2} ${CPPFLAGS3}" AC_SUBST(CPPFLAGS) PYTHONLIBDIR=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBDIR"))'` PYTHONCFGDIR=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_path("platstdlib") +"/config")'` PYTHONFRAMEWORKDIR=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORKDIR"))'` PYTHONFRAMEWORKPREFIX=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORKPREFIX"))'` PYTHONFRAMEWORK=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("PYTHONFRAMEWORK"))'` if test "${PYTHON_LDVERSION}" != "${PYTHON_VERSION}"; then PYTHONCFGDIR="${PYTHONCFGDIR}-${PYTHON_LDVERSION}" fi if test "${PYTHONFRAMEWORKDIR}" = "no-framework" -o \ "${ENABLE_FRAMEWORK}" != "yes"; then if test "${PYTHONLIBDIR}" != "${APXS_LIBDIR}" ; then LDFLAGS1="-L${PYTHONLIBDIR}" fi if test "${PYTHONCFGDIR}" != "${APXS_LIBDIR}" ; then LDFLAGS2="-L${PYTHONCFGDIR}" fi LDLIBS1="-lpython${PYTHON_LDVERSION}" # MacOS X seems to be broken and doesn't use ABIFLAGS suffix # so add a check to try and work out what we need to do. if test -f "${PYTHONLIBDIR}/libpython${PYTHON_VERSION}.a"; then LDLIBS1="-lpython${PYTHON_VERSION}" fi if test -f "${PYTHONCFGDIR}/libpython${PYTHON_VERSION}.a"; then LDLIBS1="-lpython${PYTHON_VERSION}" fi LDLIBS2=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBS"))'` LDLIBS3=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("SYSLIBS"))'` else LDFLAGS1="-Wl,-F${PYTHONFRAMEWORKPREFIX} -framework ${PYTHONFRAMEWORK}" VERSION="${PYTHON_VERSION}" STRING="${PYTHONFRAMEWORKDIR}/Versions/${VERSION}/${PYTHONFRAMEWORK}" LDFLAGS2=`${PYTHON} -c "from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var( \"LINKFORSHARED\").replace(\"${STRING}\", ''))" | \ sed -e 's/-Wl,-stack_size,[[0-9]]*//'` LDLIBS1=`${PYTHON} -c 'from sys import stdout; \ import sysconfig; \ stdout.write(sysconfig.get_config_var("LIBS"))'` fi CFLAGS1="" for arg in ${CFLAGS} do CFLAGS1="${CFLAGS1} -Wc,$arg" done CFLAGS2="" if test -x /usr/bin/lipo; then LDFLAGS3="" ARCHITECTURES=`/usr/bin/lipo -info $HTTPD | sed -e 's/.*://'` for ARCH in $ARCHITECTURES; do CFLAGS2="${CFLAGS2} -Wc,'-arch ${ARCH}'" LDFLAGS3="${LDFLAGS3} -arch ${ARCH}" done fi CFLAGS="${CFLAGS1} ${CFLAGS2}" LDFLAGS="${LDFLAGS} ${LDFLAGS1} ${LDFLAGS2} ${LDFLAGS3}" LDLIBS="${LDLIBS} ${LDLIBS1} ${LDLIBS2} ${LDLIBS3}" AC_SUBST(CFLAGS) AC_SUBST(LDFLAGS) AC_SUBST(LDLIBS) AC_SUBST(DESTDIR) LIBEXECDIR="`${APXS} -q LIBEXECDIR`" AC_SUBST(LIBEXECDIR) HTTPD_MAJOR_VERSION=`echo ${HTTPD_VERSION} | sed -e 's/\..*//'` AC_OUTPUT(Makefile) mod_wsgi-5.0.0/docs/000077500000000000000000000000001452636074700142725ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/Makefile000066400000000000000000000152561452636074700157430ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* mkdir $(BUILDDIR)/html touch $(BUILDDIR)/html/__init__.py html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/mod_wsgi.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/mod_wsgi.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/mod_wsgi" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/mod_wsgi" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." mod_wsgi-5.0.0/docs/_build/000077500000000000000000000000001452636074700155305ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/_build/html/000077500000000000000000000000001452636074700164745ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/_build/html/__init__.py000066400000000000000000000000001452636074700205730ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/_static/000077500000000000000000000000001452636074700157205ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/_static/.gitkeep000066400000000000000000000000001452636074700173370ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/_templates/000077500000000000000000000000001452636074700164275ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/_templates/.gitkeep000066400000000000000000000000001452636074700200460ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/conf.py000066400000000000000000000205071452636074700155750ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # mod_wsgi documentation build configuration file, created by # sphinx-quickstart on Mon Mar 17 14:27:37 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'mod_wsgi' copyright = u'2007-2023, Graham Dumpleton' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. import re def _version_info(): path = '../src/server/wsgi_version.h' pattern = r'#define MOD_WSGI_VERSION_STRING "(?P[^"]*)"' with open(path, 'r') as fp: match = re.search(pattern, fp.read(), flags=re.MULTILINE) return match.group('version').split('.') version_info = _version_info() # The short X.Y version. version = '.'.join(version_info[:2]) # The full version, including alpha/beta/rc tags. release = '.'.join(version_info) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: html_theme = 'default' else: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'mod_wsgidoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'mod_wsgi.tex', u'mod\\_wsgi Documentation', u'Graham Dumpleton', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'mod_wsgi', u'mod_wsgi Documentation', [u'Graham Dumpleton'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'mod_wsgi', u'mod_wsgi Documentation', u'Graham Dumpleton', 'mod_wsgi', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False mod_wsgi-5.0.0/docs/configuration-directives/000077500000000000000000000000001452636074700213005ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/configuration-directives/WSGIAcceptMutex.rst000066400000000000000000000016141452636074700247500ustar00rootroot00000000000000=============== WSGIAcceptMutex =============== :Description: Specify type of accept mutex used by daemon processes. :Syntax: ``WSGIAcceptMutex Default`` | *method* :Default: ``WSGIAcceptMutex Default`` :Context: server config The ``WSGIAcceptMutex`` directive sets the method that mod_wsgi will use to serialize multiple daemon processes in a process group accepting requests on a socket connection from the Apache child processes. If this directive is not defined then the same type of mutex mechanism as used by Apache for the main Apache child processes when accepting connections from a client will be used. If set the method types are the same as for the Apache `AcceptMutex`_ directive. Note that the ``WSGIAcceptMutex`` directive and corresponding features are not available on Windows or when running Apache 1.3. .. _AcceptMutex: http://httpd.apache.org/docs/2.4/mod/mpm_common.html#acceptmutex mod_wsgi-5.0.0/docs/configuration-directives/WSGIAccessScript.rst000066400000000000000000000022501452636074700251110ustar00rootroot00000000000000================ WSGIAccessScript ================ :Description: Specify script implementing host access controls. :Syntax: ``WSGIAccessScript`` *path* [ *options* ] :Context: directory, .htaccess :Override: AuthConfig The ``WSGIAccessScript`` directive provides a mechanism for implementing host access controls. More detailed information on using the ``WSGIAccessScript`` directive can be found in :doc:`../user-guides/access-control-mechanisms`. The options which can be supplied to the ``WSGIAccessScript`` directive are: **application-group=name** Specifies the name of the application group within the specified process for which the script file will be loaded. If the ``application-group`` option is not supplied, the special value ``%{GLOBAL}`` which denotes that the script file be loaded within the context of the first interpreter created by Python when it is initialised will be used. Otherwise, will be loaded into the interpreter for the specified application group. Note that the script always runs in processes associated with embedded mode. It is not possible to delegate the script such that it is run within context of a daemon process. mod_wsgi-5.0.0/docs/configuration-directives/WSGIApplicationGroup.rst000066400000000000000000000122311452636074700260030ustar00rootroot00000000000000==================== WSGIApplicationGroup ==================== :Description: Sets which application group WSGI application belongs to. :Syntax: ``WSGIApplicationGroup name`` ``WSGIApplicationGroup %{GLOBAL}`` ``WSGIApplicationGroup %{SERVER}`` ``WSGIApplicationGroup %{RESOURCE}`` ``WSGIApplicationGroup %{ENV:variable}`` :Default: ``WSGIApplicationGroup %{RESOURCE}`` :Context: server config, virtual host, directory The ``WSGIApplicationGroup`` directive can be used to specify which application group a WSGI application or set of WSGI applications belongs to. All WSGI applications within the same application group will execute within the context of the same Python sub interpreter of the process handling the request. Setting ``WSGIApplicationGroup`` doesn't control what processes a request is handled by, that is what the ``WSGIProcessGroup`` directive does. In other words, the ``WSGIProcessGroup`` directive operates distinct from the ``WSGIApplicationGroup`` directive, with ``WSGIProcessGroup`` dictating what named group of processes a request is handled by, and ``WSGIApplicationGroup`` dictating which named Python sub interpreter context (application group) of those processes is used. In each distinct process of a named group of processes, there will be a separate sub interpreter instance of same name, for handling the requests accepted by that process. The argument to the ``WSGIApplicationGroup`` can be either one of four special expanding variables or an explicit name of your own choosing. The meaning of the special variables are: **%{GLOBAL}** The application group name will be set to the empty string. Any WSGI applications in the global application group will always be executed within the context of the first interpreter created by Python when it is initialised, of the process handling the request. Forcing a WSGI application to run within the first interpreter can be necessary when a third party C extension module for Python has used the simplified threading API for manipulation of the Python GIL and thus will not run correctly within any additional sub interpreters created by Python. **%{SERVER}** The application group name will be set to the server hostname. If the request arrived over a non standard HTTP/HTTPS port, the port number will be added as a suffix to the group name separated by a colon. For example, if the virtual host ``www.example.com`` is handling requests on the standard HTTP port (80) and HTTPS port (443), a request arriving on either port would see the application group name being set to ``www.example.com``. If instead the virtual host was handling requests on port 8080, then the application group name would be set to ``www.example.com:8080``. **%{RESOURCE}** The application group name will be set to the server hostname and port as for the ``%{SERVER}`` variable, to which the value of WSGI environment variable ``SCRIPT_NAME`` is appended separated by the file separator character. For example, if the virtual host ``www.example.com`` was handling requests on port 8080 and the URL-path which mapped to the WSGI application was:: http://www.example.com/wsgi-scripts/foo then the application group name would be set to:: www.example.com:8080|/wsgi-scripts/foo The effect of using the ``%{RESOURCE}`` variable expansion is for each application on any server to be isolated from all others by being mapped to its own Python sub interpreter. **%{ENV:variable}** The application group name will be set to the value of the named environment variable. The environment variable is looked-up via the internal Apache notes and subprocess environment data structures and (if not found there) via ``getenv()`` from the Apache server process. In an Apache configuration file, environment variables accessible using the ``%{ENV}`` variable reference can be setup by using directives such as `SetEnv`_ and `RewriteRule`_. For example, to group all WSGI scripts for a specific user when using `mod_userdir`_ within the same application group, the following could be used:: RewriteEngine On RewriteCond %{REQUEST_URI} ^/~([^/]+) RewriteRule . - [E=APPLICATION_GROUP:~%1] Options ExecCGI SetHandler wsgi-script WSGIApplicationGroup %{ENV:APPLICATION_GROUP} Note that in embedded mode or a multi process daemon process group, there will be an instance of the named sub interpreter in each process. Thus the directive only ensures that request is handled in the named sub interpreter within the process that handles the request. If you need to ensure that requests for a specific user always go back to the exact same sub interpreter, then you will need to use a daemon process group with only a single process, or implement sticky session mechanism across a number of single process daemon process groups. .. _SetEnv: http://httpd.apache.org/docs/2.2/mod/mod_env.html#setenv .. _RewriteRule: http://httpd.apache.org/docs/2.2/mod/mod_rewrite.html#rewriterule .. _mod_userdir: http://httpd.apache.org/docs/2.2/mod/mod_userdir.html mod_wsgi-5.0.0/docs/configuration-directives/WSGIAuthGroupScript.rst000066400000000000000000000023421452636074700256300ustar00rootroot00000000000000=================== WSGIAuthGroupScript =================== :Description: Specify script implementing group authorisation. :Syntax: ``WSGIAuthGroupScript`` *path* [ *options* ] :Context: directory, .htaccess :Override: AuthConfig The ``WSGIAuthGroupScript`` directive provides a mechanism for implementing group authorisation using the Apache ``Require`` directive. More detailed information on using the ``WSGIAuthGroupScript`` directive can be found in :doc:`../user-guides/access-control-mechanisms`. The options which can be supplied to the ``WSGIAuthGroupScript`` directive are: **application-group=name** Specifies the name of the application group within the specified process for which the script file will be loaded. If the ``application-group`` option is not supplied, the special value ``%{GLOBAL}`` which denotes that the script file be loaded within the context of the first interpreter created by Python when it is initialised will be used. Otherwise, will be loaded into the interpreter for the specified application group. Note that the script always runs in processes associated with embedded mode. It is not possible to delegate the script such that it is run within context of a daemon process. mod_wsgi-5.0.0/docs/configuration-directives/WSGIAuthUserScript.rst000066400000000000000000000032111452636074700254460ustar00rootroot00000000000000================== WSGIAuthUserScript ================== :Description: Specify script implementing an authentication provider. :Syntax: ``WSGIAuthUserScript`` *path* [ *options* ] :Context: directory, .htaccess :Override: AuthConfig The WSGIAuthUserScript directive can be used to specify a script which implements an Apache authentication provider. Such an authentication provider can be used where you want Apache to worry about the handshaking related to HTTP Basic and Digest authentication and you only wish to deal with supplying the user credentials for authenticating the user. If using at least Apache 2.2, other Apache modules implementing custom authentication mechanisms can also make use of the authentication provider if they are using the corresponding Apache C API for accessing them. More detailed information on using the WSGIAuthUserScript directive can be found in :doc:`../user-guides/access-control-mechanisms`. The options which can be supplied to the WSGIAuthUserScript directive are: **application-group=name** Specifies the name of the application group within the specified process for which the script file will be loaded. If the 'application-group' option is not supplied, the special value '%{GLOBAL}' which denotes that the script file be loaded within the context of the first interpreter created by Python when it is initialised will be used. Otherwise, will be loaded into the interpreter for the specified application group. Note that the script always runs in processes associated with embedded mode. It is not possible to delegate the script such that it is run within context of a daemon process. mod_wsgi-5.0.0/docs/configuration-directives/WSGICallableObject.rst000066400000000000000000000024011452636074700253470ustar00rootroot00000000000000================== WSGICallableObject ================== :Description: Sets the name of the WSGI application callable. :Syntax: ``WSGICallableObject`` *name* ``WSGICallableObject %{ENV:variable}`` :Default: ``WSGICallableObject application`` :Context: server config, virtual host, directory, .htaccess :Override: ``FileInfo`` The WSGICallableObject directive can be used to override the name of the Python callable object in the script file which is used as the entry point into the WSGI application. When ``%{ENV}`` is being used, the environment variable is looked-up via the internal Apache notes and subprocess environment data structures and (if not found there) via getenv() from the Apache server process. In an Apache configuration file, environment variables accessible using the ``%{ENV}`` variable reference can be setup by using directives such as `SetEnv`_ and `RewriteRule`_. Note that the name of the callable object must be an object present at global scope within the WSGI script file. It is not possible to use a dotted path to refer to a sub object of a module imported by the WSGI script file. .. _SetEnv: http://httpd.apache.org/docs/2.2/mod/mod_env.html#setenv .. _RewriteRule: http://httpd.apache.org/docs/2.2/mod/mod_rewrite.html#rewriterule mod_wsgi-5.0.0/docs/configuration-directives/WSGICaseSensitivity.rst000066400000000000000000000020321452636074700256470ustar00rootroot00000000000000=================== WSGICaseSensitivity =================== :Description: Define whether file system is case sensitive. :Syntax: ``WSGICaseSensitivity On|Off`` :Context: server config When mod_wsgi is used on the Windows and MacOS X platforms, it will assume that the filesystem in use is case insensitive. This is necessary to ensure that the module caching system works correctly and only one module is retained in memory where paths with different case are used to identify the same script file. On other platforms it will always be assumed that a case sensitive file system is used. The WSGICaseSensitivity directive can be used explicitly to specify for a particular WSGI application whether the file system the script file is stored in is case sensitive or not, thus overriding the default for any platform. A value of On indicates that the filesystem is case sensitive. Because it is set in the main server config it will apply to the whole site. All paths therefore would need to be located in a filesystem with the same case convention. mod_wsgi-5.0.0/docs/configuration-directives/WSGIChunkedRequest.rst000066400000000000000000000044241452636074700254620ustar00rootroot00000000000000================== WSGIChunkedRequest ================== :Description: Enabled support for chunked request content. :Syntax: ``WSGIChunkedRequest On|Off`` :Default: ``WSGIChunkedRequest Off`` :Context: server config, virtual host, directory, .htaccess The WSGIChunkedRequest directive can be used to enable support for chunked request content. Rather than Apache rejecting a request using chunked request content, it will be allowed to pass through. Do note however that WSGI is technically incapable of supporting chunked request content without all chunked request content having to be first read in and buffered. This is because WSGI requires ``CONTENT_LENGTH`` be set when there is any request content. In mod_wsgi no buffering is done. Thus, to be able to read the request content in the case of a chunked transfer encoding, you need to step outside of the WSGI specification and do things it says you aren't meant to. You have two choices for how you can do this. The first choice you have is to call ``read()`` on ``wsgi.input`` but not supply any argument at all. This will cause all request content to be read in and returned. The second is to loop on calling ``read()`` on ``wsgi.input`` with a set block size passed as argument and do this until ``read()`` returns an empty string. Because both calling methods are not allowed under WSGI specification, in using these, your code will not technically be portable to other WSGI hosting mechanisms, although if those other WSGI servers support it, you will be okay. That all said, although technically not permitted by the WSGI specification, some WSGI frameworks do now incoporate support for handling chunked request content, as well as where compressed request content is expanded by the web server such that ``CONTENT_LENGTH`` is no longer accurate. The required behaviour is enabled in these frameworks by the WSGI server passing through the non standard ``wsgi.input_terminated`` key set as ``True`` in the per request WSGI ``environ`` dictionary. When this is done the web frameworks will always read all available input and ignore ``CONTENT_LENGTH``. Because mod_wsgi guarantees that an empty string is returned when all input is exhausted, it will always set this flag. It is known that Flask/Werkzeug supports the ``wsgi.input_terminated`` flag. mod_wsgi-5.0.0/docs/configuration-directives/WSGIDaemonProcess.rst000066400000000000000000000774031452636074700253010ustar00rootroot00000000000000================= WSGIDaemonProcess ================= :Description: Configure a distinct daemon process for running applications. :Syntax: ``WSGIDaemonProcess`` *name* ``[`` *options* ``]`` :Context: server config, virtual host The ``WSGIDaemonProcess`` directive can be used to specify that distinct daemon processes should be created to which the running of WSGI applications can be delegated. Where Apache has been started as the ``root`` user, the daemon processes can be run as a user different to that which the Apache child processes would normally be run as. When distinct daemon processes are enabled and used, the process is dedicated to mod_wsgi and the only thing that the processes do is run the WSGI applications assigned to that process group. Any other Apache modules such as PHP or activities such as serving up static files continue to be run in the standard Apache child processes. Note that having denoted that daemon processes should be created by using the ``WSGIDaemonProcess`` directive, the ``WSGIProcessGroup`` directive, or the ``process-group`` option of ``WSGIScriptAlias`` still needs to be used to delegate specific WSGI applications to execute within those daemon processes. Also note that the name of the daemon process group must be unique for the whole server. That is, it is not possible to use the same daemon process group name in different virtual hosts. Options which can be supplied to the ``WSGIDaemonProcess`` directive are: .. _processes: **processes=num** Defines the number of daemon processes that should be started in this process group. If not defined then only one process will be run in this process group. Note that if this option is defined as ``processes=1``, then the WSGI environment attribute called ``wsgi.multiprocess`` will be set to be ``True`` whereas not providing the option at all will result in the attribute being set to be ``False``. This distinction is to allow for where some form of load balancing is used across process groups in the same Apache instance, or separate Apache instances. If you need to ensure that ``wsgi.multiprocess`` is ``False`` so that interactive debuggers will work, simply do not specify the ``processes`` option and allow the default single daemon process to be created in the process group. .. _threads: **threads=num** Defines the number of threads to be created to handle requests in each daemon process within the process group. If this option is not defined then the default will be to create 15 threads in each daemon process within the process group. Do not get carried away and set this to a very large number in the belief that it will somehow magically enable you to handle many more concurrent users. Any sort of increased value would only be appropriate where your code is I/O bound. If you code is CPU bound, you are better of using at most 3 to 5 threads per process and using more processes. If you set the number of threads to 0 you will enable a special mode intended for using a daemon process to run a managed set of processes. You will need to use ``WSGIImportScript`` to pre-load a Python script into the main application group specified by ``%{GLOBAL}`` where the script runs a never ending task, or does an exec to run an external program. If the script or external program exits, the process is shutdown and replaced with a new one. For the case of using a Python script to run a never ending task, a ``SystemExit`` exception will be injected when a signal is received to shutdown the process. You can use ``signal.signal()`` to register a signal handler for ``SIGTERM`` if needing to run special actions before then exiting the process using ``sys.exit()``, or to signal your own threads to exit any processing so you can shutdown in an orderly manner. .. _display-name: **display-name=value** Defines a different name to show for the daemon process when using the ``ps`` command to list processes. If the value is ``%{GROUP}`` then the name will be ``(wsgi:group)`` where ``group`` is replaced with the name of the daemon process group. Note that only as many characters of the supplied value can be displayed as were originally taken up by ``argv0`` of the executing process. Anything in excess of this will be truncated. This feature may not work as described on all platforms. Typically it also requires a ``ps`` program with BSD heritage. Thus on some versions of Solaris UNIX the ``/usr/bin/ps`` program doesn't work, but ``/usr/ucb/ps`` does. Other programs which can display this value include ``htop``. .. _home: **home=directory** Defines an absolute path of a directory which should be used as the initial current working directory of the daemon processes within the process group. If this option is not defined the initial current working directory will be set to be the home directory of the user that the daemon process is configured to run as using the ``user`` option to the ``WSGIDaemonProcess`` directive. Otherwise the current working directory of Apache when started will be used, which if Apache is being started from system init scripts, would usually be the system root directory. .. _user: **user=name | user=#uid** Defines the UNIX user *name* or numeric user *uid* of the user that the daemon processes should be run as. If this option is not supplied the daemon processes will be run as the same user that Apache would run child processes, as defined by the `User`_ directive, and it is not necessary to set this to the Apache user yourself. Note that this option is ignored if Apache wasn't started as the root user, in which case no matter what the settings, the daemon processes will be run as the user that Apache was started as. Also be aware that mod_wsgi will not allow you to run a daemon process group as the root user due to the security risk of running a web application as root. .. _group: **group=name | group=#gid** Defines the UNIX group *name* or numeric group *gid* of the primary group that the daemon processes should be run as. If this option is not supplied the daemon processes will be run as the same group that Apache would run child processes, as defined by the `Group`_ directive, and it is not necessary to set this to the Apache group yourself. Note that this option is ignored if Apache wasn't started as the root user, in which case no matter what the settings, the daemon processes will be run as the group that Apache was started as. .. _supplementary-groups: **supplementary-groups=group1 | supplementary-groups=group1,group2** Defines a list of additional UNIX groups that the user the daemon process group runs as, should be added to, in addition to primary UNIX group associated with that user. When specifying more than one group, separate the names of the groups with a comma. .. _umask: **umask=0nnn** Defines a value to be used for the umask of the daemon processes within the process group. The value must be provided as an octal number. If this option is not defined then the umask of the user that Apache is initially started as will be inherited by the process. Typically the inherited umask would be '0022'. .. _lang: **lang=locale** Set the current language locale. This is the same as having set the ``LANG`` environment variable. You will need to set this on many Linux systems where Apache when started up from system init scripts uses the default C locale, meaning that the default system encoding is ASCII. Unless you need a special language locale, set this to ``en_US.UTF-8``. Whether the ``lang`` or ``locale`` option works best can depend on the system being used. Set both if you aren't sure which is appropriate. .. _locale: **locale=locale** Set the current language locale. This is the same as having set the ``LC_ALL`` environment variable. You will need to set this on many Linux systems where Apache when started up from system init scripts uses the default C locale, meaning that the default system encoding is ASCII. Unless you need a special language locale, set this to ``en_US.UTF-8``. Whether the ``lang`` or ``locale`` option works best can depend on the system being used. Set both if you aren't sure which is appropriate. .. _chroot: **chroot=directory** Run the daemon process group process within a chroot jail. Use of a chroot jail is now deprecated due to the difficulty in setting up a chroot environment. It is recommended that you use more modern containerisation technologies such as Docker or runC. .. _script-user: **script-user=name | script-user=#uid** Sets the user that must be the owner of any WSGI script file delegated to be run in the daemon process group. If the owner doesn't match a HTTP Forbidden response will be returned for any request. Note that this doesn't change what user the daemon process group runs as at any time. If you want to set the user that the daemon process group runs as, use the ``user`` option. Only one of ``script-user`` or ``script-group`` option can be used at the same time. .. _script-group: **script-group=name | script-group=#gid** Sets the group that must be the group of any WSGI script file delegated to be run in the daemon process group. If the group doesn't match a HTTP Forbidden response will be returned for any request. Note that this doesn't change what group the daemon process group runs as at any time. If you want to set the group that the daemon process group runs as, use the ``group`` option. Only one of ``script-user`` or ``script-group`` option can be used at the same time. .. _python-home: **python-home=directory** Set the location of the Python virtual environment to be used by the daemon processes. The directory to use is that which ``sys.prefix`` is set to for the Python virtual environment. The virtual environment can have been created by ``virtualenv``, ``pyvenv`` or ``python -m venv``. Note that the Python virtual environment must have been created using the same base Python version as was used to compile the mod_wsgi module. You can't use this to force mod_wsgi to somehow use a different Python version than it was compiled for. If you want to use a different version of Python, you will need to reinstall mod_wsgi, compiling it for the version you want. It is not possible for the one mod_wsgi instance to run applications for both Python 2 and 3 at the same time. .. _python-path: **python-path=directory | python-path=directory:directory** List of colon separated directories to add to the Python module search path, ie., ``sys.path``. Note that this is not strictly the same as having set the ``PYTHONPATH`` environment variable when running normal command line Python. When this option is used, the directories are added by calling ``site.addsitedir()``. As well as adding the directory to ``sys.path`` this function has the effect of opening and interpreting any ``.pth`` files located in the specified directories. If using a Python virtual environment, rather than use this option to refer to the ``site-packages`` directory of the Python virtual environment, you should use the ``python-home`` option to specify the root of the Python virtual environment instead. In all cases, if the directory contains Python packages which have C extension components, those packages must have been installed using the same base Python version as was used to compile the mod_wsgi module. You should not mix packages from different Python versions or installations. .. _python-eggs: **python-eggs=directory** Directory to be used as the Python egg cache directory. This is equivalent to having set the ``PYTHON_EGG_CACHE`` environment variable. Note that the directory specified must exist and be writable by the user that the daemon process run as. .. _restart-interval: **restart-interval=sss** Defines a time limit in seconds for how long a daemon process should run before being restarted. This might be use to periodically force restart the WSGI application processes when you have issues related to Python object reference count cycles, or incorrect use of in memory caching, which causes constant memory growth. If this option is not defined, or is defined to be 0, then the daemon process will be persistent and will continue to service requests until Apache itself is restarted or shutdown. Avoid setting this too low. This is because the constant restarting and reloading of your WSGI application may cause unecessary load on your system and affect performance. You can use the ``graceful-timeout`` option in conjunction with this option to reduce the chances that an active request will be interrupted when a restart occurs due to the use of this option. .. _maximum-requests: **maximum-requests=nnn** Defines a limit on the number of requests a daemon process should process before it is shutdown and restarted. This might be use to periodically force restart the WSGI application processes when you have issues related to Python object reference count cycles, or incorrect use of in memory caching, which causes constant memory growth. If this option is not defined, or is defined to be 0, then the daemon process will be persistent and will continue to service requests until Apache itself is restarted or shutdown. Avoid setting this to a low number of requests on a site which handles a lot of traffic. This is because the constant restarting and reloading of your WSGI application may cause unecessary load on your system and affect performance. Only use this option if you have no other choice due to a memory usage issue. Stop using it as soon as any memory issue has been resolved. You can use the ``graceful-timeout`` option in conjunction with this option to reduce the chances that an active request will be interrupted when a restart occurs due to the use of this option. .. _inactivity-timeout: **inactivity-timeout=sss** Defines the maximum number of seconds allowed to pass before the daemon process is shutdown and restarted when the daemon process has entered an idle state. For the purposes of this option, being idle means there are no currently active requests and no new requests are being received. This option exists to allow infrequently used applications running in a daemon process to be restarted, thus allowing memory being used to be reclaimed, with process size dropping back to the initial startup size before any application had been loaded or requests processed. Note that after any restart of the WSGI application process, the WSGI application will need to be reloaded. This can mean that the first request received by a process after the process was restarted can be slower. If you WSGI application has a very high startup cost on CPU and time, it may not be a good idea to use the option. See also the ``request-timeout`` option for forcing a process restart when requests block for a specified period of time. Note that similar functionality to that of the ``request-timeout`` option, for forcing a restart when requests blocked, was part of what was implemented by the ``inactivity-timeout`` option. The request timeout was broken out into a separate feature in version 4.1.0 of mod_wsgi. .. _request-timeout: **request-timeout=sss** Defines the maximum number of seconds that a request is allowed to run before the daemon process is restarted. This can be used to recover from a scenario where a request blocks indefinitely, and where if all request threads were consumed in this way, would result in the whole WSGI application process being blocked. How this option is seen to behave is different depending on whether a daemon process uses only one thread, or more than one thread for handling requests, as set by the ``threads`` option. If there is only a single thread, and so the process can only handle one request at a time, as soon as the timeout has passed, a restart of the process will be initiated. If there is more than one thread, the request timeout is applied to the average running time for any requests, across all threads. This means that a request can run longer than the request timeout. This is done to reduce the possibility of interupting other running requests, and causing a user to see a failure. So where there is still capacity to handle more requests, restarting of the process will be delayed if possible. .. _deadlock-timeout: **deadlock-timeout=sss** Defines the maximum number of seconds allowed to pass before the daemon process is shutdown and restarted after a potential deadlock on the Python GIL has been detected. The default is 300 seconds. This option exists to combat the problem of a daemon process freezing as the result of a rogue Python C extension module which doesn't properly release the Python GIL when entering into a blocking or long running operation. .. _startup-timeout: **startup-timeout=sss** Defines the maximum number of seconds allowed to pass waiting to see if a WSGI script file can be loaded successfully by a daemon process. When the timeout is passed, the process will be restarted. This can be used to force the reloading of a process when a transient issue occurs on the first attempt to load the WSGI script file, but subsequent attempts still fail because a Python package that was loaded has retained state that prevents attempts to run initialisation a second time within the same process. The Django package can cause this scenario as the initialisation of Django itself can no longer be attempted more than once in the same process. .. _graceful-timeout: **graceful-timeout=sss** When ``maximum-requests`` is used and the maximum has been reached, or ``cpu-time-limit`` is used and the CPU limit reached, or ``restart-interval`` is used and the time limit reached, if ``graceful-timeout`` is set, then the process will continue to run for the number of second specified by this option, while still accepting new requests, to see if the process reaches an idle state. If the process reaches an idle state, it will then be resarted immediately. If the process doesn't reach an idle state and the graceful restart timeout expires, the process will be restarted, even if it means that requests may be interrupted. .. _eviction-timeout: **eviction-timeout=sss** When a daemon process is sent the graceful restart signal, usually ``SIGUSR1``, to restart a process, this timeout controls how many seconds the process will wait, while still accepting new requests, before it reaches an idle state with no active requests and shutdown. If this timeout is not specified, then the value of the ``graceful-timeout`` will instead be used. If the ``graceful-timeout`` is not specified, then the restart when sent the graceful restart signal will instead happen immediately, with the process being forcibly killed, if necessary, when the shutdown timeout has expired. .. _shutdown-timeout: **shutdown-timeout=sss** Defines the maximum number of seconds allowed to pass when waiting for a daemon process to shutdown. When this timeout has been reached the daemon process will be forced to exited even if there are still active requests or it is still running Python exit functions. The shutdown timeout is applied after any graceful restart timeout or eviction timeout if they have been specified. No new requests are accepted during the shutdown timeout is being applied. If this option is not defined, then the shutdown timeout will be set to 5 seconds. Note that this option does not change the shutdown timeout applied to daemon processes when Apache itself is being stopped or restarted. That timeout value is defined internally to Apache as 3 seconds and cannot be overridden. .. _connect-timeout: **connect-timeout=sss** Defines the maximum amount of time for an Apache child process to wait trying to get a successful connection to the mod_wsgi daemon processes. This defaults to 15 seconds. .. _socket-timeout: **socket-timeout=sss** Defines the timeout on individual reads/writes on the socket connection between the Apache child processes and the mod_wsgi daemon processes. If this is not specified, the number of seconds specified by the Apache `Timeout`_ directive will be used instead. .. _queue-timeout: **queue-timeout=sss** Defines the timeout on how long to wait for a mod_wsgi daemon process to accept a request for processing. This option is to allow one to control what to do when backlogging of requests occurs. If the daemon process is overloaded and getting behind, then it is more than likely that a user will have given up on the request anyway if they have to wait too long. This option allows you to specify that a request that was queued up waiting for too long is discarded, allowing any transient backlog to be quickly discarded and not simply cause the daemon process to become even more backlogged. When this occurs the user will recieve a 504 Gateway Time Out response. .. _listen-backlog: **listen-backlog=nnn** Defines the depth of the daemon process socket listener queue. By default the limit is 100, although this is actually a hint, as different operating systems can have different limits on the maximum value or otherwise treat it in special ways.a This option can be set, along with ``queue-timeout`` to try and better handle back logging when the WGSI application gets overloaded. .. _socket-user: **socket-user=name | socket-user=#uid** Set the owner of the UNIX listener socket for the daemon process group. This can be used when using the Apache `PrivilegesMode`_ directive with value of ``SECURE`` to change the owner of the socket from the default Apache user, to the user under which the Apache child process which is attempting to connect to the daemon process group, will run when handling requests. This is necessary otherwise the Apache child worker process will not be able to connect to the listener socket for the mod_wsgi daemon process to proxy the request to the WSGI application. This option can also be used when using third party Apache modules such as mod_ruid, mod_ruid2, mod_suid as well as the ITK MPM for Apache. .. _cpu-time: **cpu-time-limit=sss** Define the maximum amount of CPU time a daemon process is allowed to consume before a shutdown is triggered and the daemon process restarted. The point of this is to provide some means of controlling potentially run away processes due to bad code that gets stuck in heavy processing loops. Note that CPU time used is recorded from when the daemon process is first created. This means that a process will eventually reach the limit in normal use and would be restarted. You can use the ``graceful-timeout`` option to reduce the chances that an active request will be interrupted. .. _cpu-priority: **cpu-priority=num** Sets the scheduling priority set to the daemon processes. This can be a number of the range -20 to 20. The default priority is 0. A lower priority gives more favourable scheduling. .. _memory-limit: **memory-limit=num** Sets the maximum amount of memory a daemon process can use. This will have no affect on some platforms as ``RLIMIT_AS``/``RLIMIT_DATA`` with ``setrlimit()`` isn't always implemented. For example MacOS X and older Linux kernel versions do not implement this feature. You will need to test whether this feature works or not before depending on it. .. _virtual-memory-limit: **virtual-memory-limit=num** Sets the maximum amount of virtual memory a daemon process can use. This will have no affect on some platforms as ``RLIMIT_VMEM`` with ``setrlimit()`` isn't always implemented. You will need to test whether this feature works or not before depending on it. .. _stack-size: **stack-size=nnn** The amount of virtual memory in bytes to be allocated for the stack corresponding to each thread created by mod_wsgi in a daemon process. This option would be used when running Linux in a VPS system which has been configured with a quite low 'Memory Limit' in relation to the 'Context RSS' and 'Max RSS Memory' limits. In particular, the default stack size for threads under Linux is 8MB is quite excessive and could for such a VPS result in the 'Memory Limit' being exceeded before the RSS limits were exceeded. In this situation, the stack size should be dropped down to be in the region of 512KB (524288 bytes). .. _receive-buffer-size: **receive-buffer-size=nnn** Defines the UNIX socket buffer size for data being received by the daemon process from the Apache child process. This option may need to be used to override small default values set by certain operating systems and would help avoid possibility of deadlock between Apache child process and daemon process when the WSGI application generates large responses but doesn't consume request content. In general such deadlock problems would not arise with well behaved WSGI applications, but some spam bots attempting to post data to web sites are known to trigger the problem. The maximum possible value that can be set for the buffer size is operating system dependent and will need to be calculated through trial and error. .. _send-buffer-size: **send-buffer-size=nnn** Defines the UNIX socket buffer size for data being sent in the direction daemon process back to Apache child process. This option may need to be used to override small default values set by certain operating systems and would help avoid possibility of deadlock between Apache child process and daemon process when the WSGI application generates large responses but doesn't consume request content. In general such deadlock problems would not arise with well behaved WSGI applications, but some spam bots attempting to post data to web sites are known to trigger the problem. The maximum possible value that can be set for the buffer size is operating system dependent and will need to be calculated through trial and error. .. _header-buffer-size: **header-buffer-size=nnn** Defines the maximum size that a response header/value can be that is returned from a WSGI application. The default size is 32768 bytes. This might need to be overridden where excessively large response headers are returned, such as in custom authentication challenge schemes which use the ``WWW-Authenticate`` header. .. _response-buffer-size: **response-buffer-size=nnn** Defines the maximum number of bytes that will be buffered for a response in the Apache child processes when proxying the response body from the WSGI application. The default size is 65536 bytes. Be careful increasing this to provide extra buffering of responses as it contributes to the runtime memory size of the Apache child processes. .. _response-socket-timeout: **response-socket-timeout=nnn** Defines the maximum number of seconds allowed to pass before timing out on a write operation back to the HTTP client when the response buffer has filled and data is being forcibly flushed. Defaults to 0 seconds indicating that it will default to the value of the ``socket-timeout`` option. To delegate a particular WSGI application to run in a named set of daemon processes, the ``WSGIProcessGroup`` directive should be specified in appropriate context for that application, or the ``process-group`` option used on the ``WSGIScriptAlias`` directive. If neither is used to delegate the WSGI application to run in a daemon process group, the application will be run within the standard Apache child processes. If the ``WSGIDaemonProcess`` directive is specified outside of all virtual host containers, any WSGI application can be delegated to be run within that daemon process group. If the ``WSGIDaemonProcess`` directive is specified within a virtual host container, only WSGI applications associated with virtual hosts with the same server name as that virtual host can be delegated to that set of daemon processes. In the case where you have two separate ``VirtualHost`` definitions for the same ``ServerName``, but where one is for port 80 and the other for port 443, specify the ``WSGIDaemonProcess`` directive in the first ``VirtualHost``. You can then refer to that daemon process group by name from the second ``VirtualHost``. Using one daemon process group across the two virtual hosts in this case is preferred as then you do not have two whole separate instances of your application for port 80 and 443. :: ServerName www.site1.com WSGIDaemonProcess www.site1.com user=joe group=joe processes=2 threads=25 WSGIProcessGroup www.site1.com ... ServerName www.site1.com WSGIProcessGroup www.site1.com ... When ``WSGIDaemonProcess`` is associated with a virtual host, the error log associated with that virtual host will be used for all Apache error log output from mod_wsgi rather than it appear in the main Apache error log. For example, if a server is hosting two virtual hosts and it is desired that the WSGI applications related to each virtual host run in distinct processes of their own and as a user which is the owner of that virtual host, the following could be used:: ServerName www.site1.com CustomLog logs/www.site1.com-access_log common ErrorLog logs/ww.site1.com-error_log WSGIDaemonProcess www.site1.com user=joe group=joe processes=2 threads=25 WSGIProcessGroup www.site1.com ... ServerName www.site2.com CustomLog logs/www.site2.com-access_log common ErrorLog logs/www.site2.com-error_log WSGIDaemonProcess www.site2.com user=bob group=bob processes=2 threads=25 WSGIProcessGroup www.site2.com ... For historical reasons and the inability to change existing behaviour when adding or changing features, many of the options to ``WSGIDaemonProcess``, especially those related to timeouts are not enabled by default. It is strongly recommended you explicitly set these options yourself as this will give you a system which is better able to recover from backlogging due to overloading when you have too many long running requests or hanging requests. As a starting point you can see what ``mod_wsgi-express`` uses as defaults, adjusting them as necessary to suit your specific application after you research what each option does. For example, consider starting out with: * ``display-name='%{GROUP}'`` * ``lang='en_US.UTF-8'`` * ``locale='en_US.UTF-8'`` * ``threads=5`` * ``queue-timeout=45`` * ``socket-timeout=60`` * ``connect-timeout=15`` * ``request-timeout=60`` * ``inactivity-timeout=0`` * ``startup-timeout=15`` * ``deadlock-timeout=60`` * ``graceful-timeout=15`` * ``eviction-timeout=0`` * ``restart-interval=0`` * ``shutdown-timeout=5`` * ``maximum-requests=0`` Note that the ``WSGIDaemonProcess`` directive and corresponding features are not available on Windows. .. _User: http://httpd.apache.org/docs/2.4/mod/mod_unixd.html#user .. _Group: http://httpd.apache.org/docs/2.4/mod/mod_unixd.html#group .. _Timeout: http://httpd.apache.org/docs/2.4/mod/core.html#timeout .. _PrivilegesMode: https://httpd.apache.org/docs/2.4/mod/mod_privileges.html#privilegesmode mod_wsgi-5.0.0/docs/configuration-directives/WSGIImportScript.rst000066400000000000000000000034671452636074700251750ustar00rootroot00000000000000================ WSGIImportScript ================ :Description: Specify a script file to be loaded on process start. :Syntax: ``WSGIImportScript`` *path* ``[`` *options* ``]`` :Context: server config The WSGIImportScript directive can be used to specify a script file to be loaded when a process starts. Options must be provided to indicate the name of the process group and the application group into which the script will be loaded. The options which must supplied to the WSGIImportScript directive are: **process-group=name** Specifies the name of the process group for which the script file will be loaded. The name of the process group can be set to the special value '%{GLOBAL}' which denotes that the script file be loaded for the Apache child processes. Any other value indicates appropriate process group for mod_wsgi daemon mode. **application-group=name** Specifies the name of the application group within the specified process for which the script file will be loaded. The name of the application group can be set to the special value '%{GLOBAL}' which denotes that the script file be loaded within the context of the first interpreter created by Python when it is initialised. Otherwise, will be loaded into the interpreter for the specified application group. Because the script files are loaded prior to beginning to accept any requests, any delay in loading the script will not cause actual requests to be blocked. As such, the WSGIImportScript can be used to preload a WSGI application script file on process start so that it is ready when actual user requests arrive. For where there are multiple processes handling requests, this can reduce or eliminate the apparent stalling of an application when performing a restart of Apache or a daemon mode process group. mod_wsgi-5.0.0/docs/configuration-directives/WSGILazyInitialization.rst000066400000000000000000000100041452636074700263460ustar00rootroot00000000000000====================== WSGILazyInitialization ====================== :Description: Enable/disable lazy initialisation of Python. :Syntax: ``WSGILazyInitialization On|Off`` :Default: ``WSGILazyInitialization On`` :Context: server config The WSGILazyInitialization directives sets whether or not the Python interpreter is preinitialised within the Apache parent process or whether lazy initialisation is performed, and the Python interpreter only initialised in the Apache server processes or mod_wsgi daemon processes after they have forked from the Apache parent process. In versions of mod_wsgi prior to version 3.0 the Python interpreter was always preinitialised in the Apache parent process. This did mean that theoretically some benefit in memory usage could be derived from delayed copy on write semantics of memory inherited by child processes that was initialised in the parent. This memory wasn't significant however and was tempered by the fact that the Python interpreter when destroyed and then reinitialised in the Apache parent process on an Apache restart, would with some Python versions leak memory. This meant that if a server had many restarts performed, the Apache parent process and thus all forked child processes could grow in memory usage over time, eventually necessitating Apache be completely stopped and then restarted. This issue of memory leaks with the Python interpreter reached an extreme with Python 3.0, where by design, various data structures would not be destroyed on the basis that it would be reused when Python interpreter was reinitialised within the same process. The problem is that when an Apache restart is performed, mod_wsgi and the Python library are unloaded from memory, with the result that the references to that memory would be lost and so a real memory leak, of significant size and much worse that older versions of Python, would result. As a consequence, with mod_wsgi 3.0 and onwards, the Python interpreter is not initialised by default in the Apache parent process for any version of Python. This avoids completely the risk of cummulative memory leaks by the Python interpreter on a restart into the Apache parent process, albeit with potential for a slight increase in child process memory sizes. If need be, the existing behaviour can be restored by setting the directive with the value 'Off'. A further upside of using lazy initialisation is that if you are using daemon mode only, ie., not using embedded mode, you can completely turn off initialisation of the Python interpreter within the main Apache server child process. Unfortunately, because it isn't possible in the general case to know whether embedded mode will be needed or not, you will need to manually set the configuration to do this. This can be done by setting:: WSGIRestrictEmbedded On With restrictions on embedded mode enabled, any attempt to run a WSGI application in embedded mode will fail, so it will be necessary to ensure all WSGI applications are delegated to run in daemon mode. Although WSGI applications will be restricted from being run in embedded mode and the Python interpreter therefore not initialised, it will fallback to being initialised if you use any of the Python hooks for access control, authentication or authorisation providers, or WSGI application dispatch overrides. Note that if mod_python is being used in the same Apache installation, because mod_python takes precedence over mod_wsgi in initialising the Python interpreter, lazy initialisation cannot be done and so Python interpreter will continue to be preinitialised in the Apache parent process regardless of the setting of WSGILazyInitialization. Use of mod_python will thus perpetuate the risk of memory leaks and growing memory use of Apache process. This is especially the case since mod_python doesn't even properly destroy the Python interpreter in the Apache parent process on a restart and so all memory associated with the Python interpreter is leaked and not just that caused by the Python interpreter when it is destroyed and doesn't clean up after itself. mod_wsgi-5.0.0/docs/configuration-directives/WSGIPassAuthorization.rst000066400000000000000000000022231452636074700262120ustar00rootroot00000000000000===================== WSGIPassAuthorization ===================== :Description: Enable/Disable passing of authorisation headers. :Syntax: ``WSGIPassAuthorization On|Off`` :Default: ``WSGIPassAuthorization Off`` :Context: server config, virtual host, directory, .htaccess The WSGIPassAuthorization directive can be used to control whether HTTP authorisation headers are passed through to a WSGI application in the ``HTTP_AUTHORIZATION`` variable of the WSGI application environment when the equivalent HTTP request headers are present. This option would need to be set to ``On`` if the WSGI application was to handle authorisation rather than Apache doing it. Authorisation headers are not passed through by default as doing so could leak information about passwords through to a WSGI application which should not be able to see them when Apache is performing authorisation. If Apache is performing authorisation, a WSGI application can still find out what type of authorisation scheme was used by checking the variable ``AUTH_TYPE`` of the WSGI application environment. The login name of the authorised user can be determined by checking the variable ``REMOTE_USER``. mod_wsgi-5.0.0/docs/configuration-directives/WSGIProcessGroup.rst000066400000000000000000000057061452636074700251670ustar00rootroot00000000000000================ WSGIProcessGroup ================ :Description: Sets which process group WSGI application is assigned to. :Syntax: ``WSGIProcessGroup %{GLOBAL}|%{ENV:variable}|name`` :Default: ``WSGIProcessGroup %{GLOBAL}`` :Context: server config, virtual host, directory The WSGIProcessGroup directive can be used to specify which process group a WSGI application or set of WSGI applications will be executed in. All WSGI applications within the same process group will execute within the context of the same group of daemon processes. The argument to the WSGIProcessGroup can be either one of two special expanding variables or the actual name of a group of daemon processes setup using the WSGIDaemonProcess directive. The meaning of the special variables are: **%{GLOBAL}** The process group name will be set to the empty string. Any WSGI applications in the global process group will always be executed within the context of the standard Apache child processes. Such WSGI applications will incur the least runtime overhead, however, they will share the same process space with other Apache modules such as PHP, as well as the process being used to serve up static file content. Running WSGI applications within the standard Apache child processes will also mean the application will run as the user that Apache would normally run as. **%{ENV:variable}** The process group name will be set to the value of the named environment variable. The environment variable is looked-up via the internal Apache notes and subprocess environment data structures and (if not found there) via getenv() from the Apache server process. The result must identify a named process group setup using the WSGIDaemonProcess directive. In an Apache configuration file, environment variables accessible using the ``%{ENV}`` variable reference can be setup by using directives such as `SetEnv`_ and `RewriteRule`_. For example, to select which process group a specific WSGI application should execute within based on entries in a database file, the following could be used:: RewriteEngine On RewriteMap wsgiprocmap dbm:/etc/httpd/wsgiprocmap.dbm RewriteRule . - [E=PROCESS_GROUP:${wsgiprocmap:%{REQUEST_URI}}] WSGIProcessGroup %{ENV:PROCESS_GROUP} When using the WSGIProcessGroup directive, only daemon process groups defined within virtual hosts with the same server name, or those defined at global scope outside of any virtual hosts can be selected. It is not possible to select a daemon process group which is defined within a different virtual host. Which daemon process groups can be selected may be further restricted if the WSGIRestrictProcess directive has been used. Note that the WSGIProcessGroup directive and corresponding features are not available on Windows or when running Apache 1.3. .. _SetEnv: http://httpd.apache.org/docs/2.2/mod/mod_env.html#setenv .. _RewriteRule: http://httpd.apache.org/docs/2.2/mod/mod_rewrite.html#rewriterule mod_wsgi-5.0.0/docs/configuration-directives/WSGIPythonEggs.rst000066400000000000000000000013041452636074700246110ustar00rootroot00000000000000============== WSGIPythonEggs ============== :Description: Directory to use for Python eggs cache. :Syntax: ``WSGIPythonEggs`` *directory* :Context: server config Used to specify the directory to be used as the Python eggs cache directory for all sub interpreters created within embedded mode. This directive achieves the same affect as having set the ``PYTHON_EGG_CACHE`` environment variable. Note that the directory specified must exist and be writable by the user that the Apache child processes run as. The directive only applies to mod_wsgi embedded mode. To set the Python eggs cache directory for mod_wsgi daemon processes, use the 'python-eggs' option to the WSGIDaemonProcess directive instead. mod_wsgi-5.0.0/docs/configuration-directives/WSGIPythonHome.rst000066400000000000000000000045601452636074700246230ustar00rootroot00000000000000============== WSGIPythonHome ============== :Description: Absolute path to Python prefix/exec_prefix directories. :Syntax: ``WSGIPythonHome`` *prefix|prefix:exec_prefix* :Context: server config Used to indicate to Python when it is initialised where its library files are installed. This should be defined where the Python executable is not in the ``PATH`` of the user that Apache runs as, or where a system has multiple versions of Python installed in different locations in the file system, especially different installations of the same major/minor version, and the installation that Apache finds in its ``PATH`` is not the desired one. This directive can also be used to indicate a Python virtual environment created using a tool such as ``virtualenv``, to be used for the whole of mod_wsgi. When this directive is used it should be supplied the prefix for the directories containing the platform independent and system dependent Python library files. The directories should be separated by a ':'. If the same directory is used for both, then only the one directory path needs to be supplied. Where the directories are the same, this can usually be determined by looking at the value of the ``sys.prefix`` variable for the version of Python being used. Note that the Python installation being referred to using this directive must be the same major/minor version of Python that mod_wsgi was compiled for. If you want to use a different version of major/minor version of Python than currently used, you must recompile mod_wsgi against the alternate version of Python. This directive is the same as having set the environment variable ``PYTHONHOME`` in the environment of the user that Apache executes as. If this directive is used it will override any setting of ``PYTHONHOME`` in the environment of the user that Apache executes as. This directive will have no affect if mod_python is being loaded into Apache at the same time as mod_wsgi as mod_python will in that case be responsible for initialising Python. This directive is not available on Windows systems. Note that mod_wsgi 1.X will not actually reject this directive if listed in the configuration, however, it also will not do anything either. This is because on Windows systems Python ignores the ``PYTHONHOME`` environment variable and always seems to use the location of the Python DLL for determining where the library files are located. mod_wsgi-5.0.0/docs/configuration-directives/WSGIPythonOptimize.rst000066400000000000000000000045141452636074700255320ustar00rootroot00000000000000================== WSGIPythonOptimize ================== :Description: Enables basic Python optimisation features. :Syntax: ``WSGIPythonOptimize [0|1|2]`` :Default: ``WSGIPythonOptimize 0`` :Context: server config Sets the level of Python compiler optimisations. The default is '0' which means no optimisations are applied. Setting the optimisation level to '1' or above will have the effect of enabling basic Python optimisations and changes the filename extension for compiled (bytecode) files from ``.pyc`` to ``.pyo``. On the Windows platform, optimisation level of '0' apparently results in the same outcome as if the optimisation level had been set to '1'. When the optimisation level is set to '2', doc strings will not be generated and thus not retained. This may techically result in a smaller memory footprint if all ``.pyo`` files were compiled at this optimisation level, but may cause some Python packages which interrogate doc strings in some way to fail. Since all the installed ``.pyo`` files in your Python installation are not likely to be installed with level '2' optimisation, the gain from using this level of optimisation will probably be negligible if any. This is because potentially only the Python code for your own application code will be compiled with this level of optimisation. This will be the case as the ``.pyo`` files will aready exist for modules in the standard Python library and they will be used as is, rather than them being regenerated with a higher level of optimisation than they might be. Use of level '2' optimisation is therefore discouraged. This directive will have no affect if mod_python is being loaded into Apache at the same time as mod_wsgi as mod_python will in that case be responsible for initialising Python. Overall, if you do not understand what the normal 'python' executable ``-O`` option does, how the Python runtime changes it behaviour as a result, and you don't know exactly how your application would be affected by enabling this option, then do not use this option. In other words, stop trying to prematurely optimise the performance of your application through shortcuts. You will get much better performance gains by looking at the design of your application and eliminating bottlenecks within it and how it uses any database. So, put the gun down and back away, it will be better for all concerned. mod_wsgi-5.0.0/docs/configuration-directives/WSGIPythonPath.rst000066400000000000000000000057341452636074700246330ustar00rootroot00000000000000============== WSGIPythonPath ============== :Description: Additional directories to search for Python modules. :Syntax: ``WSGIPythonPath`` *directory|directory-1:directory-2:...* :Context: server config Used to specify additional directories to search for Python modules. If multiple directories are specified they should be separated by a ':' if using a UNIX like system, or ';' if using Windows. If any part of a directory path contains a space character, the complete argument string to WSGIPythonPath must be quoted. When using mod_wsgi version 1.X, this directive is the same as having set the environment variable ``PYTHONPATH`` in the environment of the user that Apache executes as. If this directive is used it will override any setting of ``PYTHONPATH`` in the environment of the user that Apache executes as. The end result is that the listed directories will be added to ``sys.path``. Note that in mod_wsgi version 1.X this applies to all Python sub interpreters created, be they in the Apache child processes when embedded mode is used, or in distinct daemon processes when daemon mode is used. It is not possible to define this differently for mod_wsgi daemon processes. If additional directories need to be added to the module search path for a specific WSGI application it should be done within the WSGI application script itself. When using mod_wsgi version 2.0, this directive does not have the same affect as having set the environment variable ``PYTHONPATH``. In fact, if ``PYTHONPATH`` is set in the environment of the user that Apache is started as, any directories so defined will still be added to ``sys.path`` and they will not be overridden. The difference with this directive when using mod_wsgi 2.0 is that each directory listed will be added to the end of ``sys.path`` by calling ``site.addsitedir()``. By using this function, as well as the directory being added to ``sys.path``, any '.pth' files located in the directories will be opened and processed. Thus, if the directories contain Python eggs, any associated directories corresponding to those Python eggs will in turn also be added automatically to ``sys.path``. Note however that when using mod_wsgi 2.0, this directive only sets up the additional Python module search directories for interpreters created in the Apache child processes where embedded mode is used. If directories need to be specified for interpreters running in daemon processes, the 'python-path' option to the WSGIDaemonProcess directive corresponding to that daemon process should instead be used. In mod_wsgi version 2.0, because directories corresponding to Python eggs are automatically added to ``sys.path``, the directive can be used to point at the ``site-packages`` directory corresponding to a Python virtual environment created by a tool such as ``virtualenv``. For mod_wsgi 1.X, this directive will have no affect if mod_python is being loaded into Apache at the same time as mod_wsgi as mod_python will in that case be responsible for initialising Python. mod_wsgi-5.0.0/docs/configuration-directives/WSGIRestrictEmbedded.rst000066400000000000000000000017401452636074700257370ustar00rootroot00000000000000==================== WSGIRestrictEmbedded ==================== :Description: Enable restrictions on use of embedded mode. :Syntax: ``WSGIRestrictEmbedded On|Off`` :Default: ``WSGIRestrictEmbedded Off`` :Context: server config The WSGIRestrictEmbedded directive determines whether mod_wsgi embedded mode is enabled or not. If set to 'On' and the restriction on embedded mode is therefore enabled, any attempt to make a request against a WSGI application which hasn't been properly configured so as to be delegated to a daemon mode process will fail with a HTTP internal server error response. For historical reasons and to maintain backward compatibility with old configurations this option is 'Off' by default. As daemon mode is the preferred deployment method, it is good practice to override the default and set this to 'On', ensuring you have set up and are always using daemon mode. This option does not exist on Windows or any other configuration where daemon mode is not available. mod_wsgi-5.0.0/docs/configuration-directives/WSGIRestrictProcess.rst000066400000000000000000000042201452636074700256600ustar00rootroot00000000000000=================== WSGIRestrictProcess =================== :Description: Restrict which daemon process groups can be selected. :Syntax: ``WSGIRestrictProcess`` *group-1 group-2 ...* :Syntax: WSGIRestrictProcess *group-1 group-2 ...* :Context: server config, virtual host, directory When using the WSGIProcessGroup directive, daemon process groups defined within virtual hosts with the same server name, or those defined at global scope outside of any virtual hosts can be selected. It is not possible to select a daemon process group which is defined within a different virtual host. To further limit which of the available daemon process groups can be selected, the WSGIRestrictProcess directive can be used to list a restricted set of daemon process group names. This could be used for example where %{ENV} substitution is being used to allow the daemon process group to be selected from a .htaccess file for a specific user. The main Apache configuration for this scenario might be:: WSGIDaemonProcess default processes=2 threads=25 ServerName www.site.com WSGIDaemonProcess bob:1 user=bob group=bob threads=25 WSGIDaemonProcess bob:2 user=bob group=bob threads=25 WSGIDaemonProcess bob:3 user=bob group=bob threads=25 WSGIDaemonProcess joe:1 user=joe group=joe threads=25 WSGIDaemonProcess joe:2 user=joe group=joe threads=25 WSGIDaemonProcess joe:3 user=joe group=joe threads=25 SetEnv PROCESS_GROUP default WSGIProcessGroup %{ENV:PROCESS_GROUP} Options ExecCGI AllowOverride FileInfo AddHandler wsgi-script .wsgi WSGIRestrictProcess bob:1 bob:2 bob:3 SetEnv PROCESS_GROUP bob:1 The .htaccess file within the users account could then delegate specific WSGI applications to different daemon process groups using the `SetEnv`_ directive:: SetEnv PROCESS_GROUP bob:2 SetEnv PROCESS_GROUP bob:3 Note that the WSGIDaemonProcess directive and corresponding features are not available on Windows or when running Apache 1.3. .. _SetEnv: http://httpd.apache.org/docs/2.2/mod/mod_env.html#setenv mod_wsgi-5.0.0/docs/configuration-directives/WSGIRestrictSignal.rst000066400000000000000000000044551452636074700254710ustar00rootroot00000000000000================== WSGIRestrictSignal ================== :Description: Enable restrictions on use of signal(). :Syntax: ``WSGIRestrictSignal On|Off`` :Default: ``WSGIRestrictSignal On`` :Context: server config A well behaved Python WSGI application should not in general register any signal handlers of its own using ``signal.signal()``. The reason for this is that the web server which is hosting a WSGI application will more than likely register signal handlers of its own. If a WSGI application were to override such signal handlers it could interfere with the operation of the web server, preventing actions such as server shutdown and restart. In the interests of promoting portability of WSGI applications, mod_wsgi restricts use of ``signal.signal()`` and will ensure that any attempts to register signal handlers are ignored. A warning notice will be output to the Apache error log indicating that this action has been taken. If for some reason there is a need for a WSGI application to register some special signal handler this behaviour can be turned off, however an application should avoid the signals ``SIGTERM``, ``SIGINT``, ``SIGHUP``, ``SIGWINCH`` and ``SIGUSR1`` as these are all used by Apache. Apache will ensure that the signal ``SIGPIPE`` is set to ``SIG_IGN``. If a WSGI application needs to override this, it must ensure that it is reset to ``SIG_IGN`` before any Apache code is run. In a multi threaded MPM this would be practically impossible to ensure so it is preferable that the handler for ``SIG_PIPE`` also not be changed. Apache does not use ``SIGALRM``, but it is generally preferable that other techniques be used to achieve the same affect. Do note that if enabling the ability to register signal handlers, such a registration can only reliably be done from within code which is implemented as a side effect of importing a script file identified by the WSGIImportScript directive. This is because signal handlers can only be registered from the main Python interpreter thread, and request handlers when using embedded mode and a multithreaded Apache MPM would generally execute from secondary threads. Similarly, when using daemon mode, request handlers would executed from secondary threads. Only code run as a side effect of WSGIImportScript is guaranteed to be executed in main Python interpreter thread. mod_wsgi-5.0.0/docs/configuration-directives/WSGIRestrictStdin.rst000066400000000000000000000020301452636074700253200ustar00rootroot00000000000000================= WSGIRestrictStdin ================= :Description: Enable restrictions on use of STDIN. :Syntax: ``WSGIRestrictStdin On|Off`` :Default: ``WSGIRestrictStdin On`` :Context: server config A well behaved Python WSGI application should never attempt to read any input directly from ``sys.stdin``. This is because ways of hosting WSGI applications such as CGI use standard input as the mechanism for receiving the content of a request from the web server. If a WSGI application were to directly read from ``sys.stdin`` it could interfere with the operation of the WSGI adapter and result in corruption of the input stream. In the interests of promoting portability of WSGI applications, mod_wsgi restricts access to ``sys.stdin`` and will raise an exception if an attempt is made to use ``sys.stdin`` explicitly. The only time that one might want to remove this restriction is if the Apache web server is being run in debug or single process mode for the purposes of being able to run an interactive Python debugger such as ``pdb``. mod_wsgi-5.0.0/docs/configuration-directives/WSGIRestrictStdout.rst000066400000000000000000000025211452636074700255260ustar00rootroot00000000000000================== WSGIRestrictStdout ================== :Description: Enable restrictions on use of STDOUT. :Syntax: ``WSGIRestrictStdout On|Off`` :Default: ``WSGIRestrictStdout On`` :Context: server config A well behaved Python WSGI application should never attempt to write any data directly to ``sys.stdout`` or use the ``print`` statement without directing it to an alternate file object. This is because ways of hosting WSGI applications such as CGI use standard output as the mechanism for sending the content of a response back to the web server. If a WSGI application were to directly write to ``sys.stdout`` it could interfere with the operation of the WSGI adapter and result in corruption of the output stream. In the interests of promoting portability of WSGI applications, mod_wsgi restricts access to ``sys.stdout`` and will raise an exception if an attempt is made to use ``sys.stdout`` explicitly. The only time that one might want to remove this restriction is purely out of convencience of being able to use the ``print`` statement during debugging of an application, or if some third party module or WSGI application was errornously using ``print`` when it shouldn't. If restrictions on using ``sys.stdout`` are removed, any data written to it will instead be sent through to ``sys.stderr`` and will appear in the Apache error log file. mod_wsgi-5.0.0/docs/configuration-directives/WSGIScriptAlias.rst000066400000000000000000000112361452636074700247450ustar00rootroot00000000000000=============== WSGIScriptAlias =============== :Description: Maps a URL to a filesystem location and designates the target as a WSGI script. :Syntax: ``WSGIScriptAlias`` *URL-path file-path|directory-path* ``[`` *options* ``]`` :Context: server config, virtual host The WSGIScriptAlias directive behaves in the same manner as the `Alias`_ directive, except that it additionally marks the target directory as containing WSGI scripts, or marks the specific *file-path* as a script, that should be processed by mod_wsgi's ``wsgi-script`` handler. Where the target is a *directory-path*, URLs with a case-sensitive (%-decoded) path beginning with *URL-path* will be mapped to scripts contained in the indicated directory. For example:: WSGIScriptAlias /wsgi-scripts/ /web/wsgi-scripts/ A request for ``http://www.example.com/wsgi-scripts/name`` in this case would cause the server to run the WSGI application defined in ``/web/wsgi-scripts/name``. This configuration is essentially equivalent to:: Alias /wsgi-scripts/ /web/wsgi-scripts/ SetHandler wsgi-script Options +ExecCGI Where the target is a *file-path*, URLs with a case-sensitive (%-decoded) path beginning with *URL-path* will be mapped to the script defined by the *file-path*. For example:: WSGIScriptAlias /name /web/wsgi-scripts/name A request for ``http://www.example.com/name`` in this case would cause the server to run the WSGI application defined in ``/web/wsgi-scripts/name``. If possible you should avoid placing WSGI scripts under the `DocumentRoot`_ in order to avoid accidentally revealing their source code if the configuration is ever changed. The WSGIScriptAlias makes this easy by mapping a URL and designating the location of any WSGI scripts at the same time. If you do choose to place your WSGI scripts in a directory already accessible to clients, do not use WSGIScriptAlias. Instead, use ``_, `SetHandler`_ and `Options`_ as in:: SetHandler wsgi-script Options ExecCGI This is necessary since multiple *URL-paths* can map to the same filesystem location, potentially bypassing the WSGIScriptAlias and revealing the source code of the WSGI scripts if they are not restricted by a ``_ section. Options which can be supplied to the ``WSGIScriptAlias`` directive are: **process-group=name** Defines which process group the WSGI application will be executed in. All WSGI applications within the same process group will execute within the context of the same group of daemon processes. If the name is set to be ``%{GLOBAL}`` the process group name will be set to the empty string. Any WSGI applications in the global process group will always be executed within the context of the standard Apache child processes. Such WSGI applications will incur the least runtime overhead, however, they will share the same process space with other Apache modules such as PHP, as well as the process being used to serve up static file content. Running WSGI applications within the standard Apache child processes will also mean the application will run as the user that Apache would normally run as. **application-group=name** Defines which application group a WSGI application or set of WSGI applications belongs to. All WSGI applications within the same application group will execute within the context of the same Python sub interpreter of the process handling the request. If the name is set to be ``%{GLOBAL}`` the application group will be set to the empty string. Any WSGI applications in the global application group will always be executed within the context of the first interpreter created by Python when it is initialised, of the process handling the request. Forcing a WSGI application to run within the first interpreter can be necessary when a third party C extension module for Python has used the simplified threading API for manipulation of the Python GIL and thus will not run correctly within any additional sub interpreters created by Python. If both ``process-group`` and ``application-group`` options are set, the WSGI script file will be pre-loaded when the process it is to run in is started, rather than being lazily loaded on the first request. .. _Alias: http://httpd.apache.org/docs/2.2/mod/mod_alias.html#alias .. _DocumentRoot: http://httpd.apache.org/docs/2.2/mod/core.html#documentroot .. _: http://httpd.apache.org/docs/2.2/mod/core.html#directory .. _SetHandler: http://httpd.apache.org/docs/2.2/mod/core.html#sethandler .. _Options: http://httpd.apache.org/docs/2.2/mod/core.html#options mod_wsgi-5.0.0/docs/configuration-directives/WSGIScriptAliasMatch.rst000066400000000000000000000102331452636074700257160ustar00rootroot00000000000000==================== WSGIScriptAliasMatch ==================== :Description: Maps a URL to a filesystem location and designates the target as a WSGI script. :Syntax: ``WSGIScriptAliasMatch`` *regex file-path|directory-path* ``[`` *options* ``]`` :Context: server config, virtual host This directive is similar to the WSGIScriptAlias directive, but makes use of regular expressions, instead of simple prefix matching. The supplied regular expression is matched against the URL-path, and if it matches, the server will substitute any parenthesized matches into the given string and use it as a filename. For example, to map a URL to scripts contained within a directory where the script files use the ``.wsgi`` extension, but it is desired that the extension not appear in the URL, use:: WSGIScriptAliasMatch ^/wsgi-scripts/([^/]+) /web/wsgi-scripts/$1.wsgi Note that you should only use WSGIScriptAliasMatch if you know what you are doing. In most cases you should be using WSGIScriptAlias instead. If you use WSGIScriptAliasMatch and don't do things the correct way, then you risk modifying the value of SCRIPT_NAME as passed to the WSGI application and this can stuff things up badly causing URL mapping to not work correctly within the WSGI application or stuff up reconstruction of the full URL when doing redirects. This is because the substitution of the matched sub pattern from the left hand side back into the right hand side is often critical. If you are using WSGIScriptAliasMatch to pass to a WSGI handler, and you need to preserve the path. You can do the following:: WSGIScriptAlias /api /var/www/mysite.com/apache/django.wsgi/api A more complicated example:: WSGIScriptAliasMatch "^/(admin|files|photologue)" /projects/Media/wsgi_handler.py/$1 This will keep the URL match from being stripped off the URL by the time it reaches the WSGI application. If you think you need to use WSGIScriptAliasMatch, you probably don't really. If you really really think you need it, then check on the mod_wsgi mailing list about how to use it properly. Options which can be supplied to the ``WSGIScriptAlias`` directive are: **process-group=name** Defines which process group the WSGI application will be executed in. All WSGI applications within the same process group will execute within the context of the same group of daemon processes. If the name is set to be ``%{GLOBAL}`` the process group name will be set to the empty string. Any WSGI applications in the global process group will always be executed within the context of the standard Apache child processes. Such WSGI applications will incur the least runtime overhead, however, they will share the same process space with other Apache modules such as PHP, as well as the process being used to serve up static file content. Running WSGI applications within the standard Apache child processes will also mean the application will run as the user that Apache would normally run as. **application-group=name** Defines which application group a WSGI application or set of WSGI applications belongs to. All WSGI applications within the same application group will execute within the context of the same Python sub interpreter of the process handling the request. If the name is set to be ``%{GLOBAL}`` the application group will be set to the empty string. Any WSGI applications in the global application group will always be executed within the context of the first interpreter created by Python when it is initialised, of the process handling the request. Forcing a WSGI application to run within the first interpreter can be necessary when a third party C extension module for Python has used the simplified threading API for manipulation of the Python GIL and thus will not run correctly within any additional sub interpreters created by Python. If both ``process-group`` and ``application-group`` options are set, and the WSGI script file doesn't include substiutions values to be supplied from the matched URL pattern, the WSGI script file will be pre-loaded when the process it is to run in is started, rather than being lazily loaded on the first request. mod_wsgi-5.0.0/docs/configuration-directives/WSGIScriptReloading.rst000066400000000000000000000011001452636074700256050ustar00rootroot00000000000000=================== WSGIScriptReloading =================== :Description: Enable/Disable detection of WSGI script file changes. :Syntax: ``WSGIScriptReloading On|Off`` :Default: ``WSGIScriptReloading On`` :Context: server config, virtual host, directory, .htaccess :Override: ``FileInfo`` The WSGIScriptReloading directive can be used to control whether changes to WSGI script files trigger the reloading mechanism. By default script reloading is enabled and a change to the WSGI script file will trigger whichever reloading mechanism is appropriate to the mode being used. mod_wsgi-5.0.0/docs/configuration-directives/WSGISocketPrefix.rst000066400000000000000000000036471452636074700251440ustar00rootroot00000000000000================ WSGISocketPrefix ================ :Description: Configure directory to use for daemon sockets. :Syntax: ``WSGISocketPrefix`` *prefix* :Context: server config Defines the directory and name prefix to be used for the UNIX domain sockets used by mod_wsgi to communicate between the Apache child processes and the daemon processes. If the directive is not defined, the sockets and any related mutex lock files will be placed in the standard Apache runtime directory. This is the same directory that the Apache log files would normally be placed. For some Linux distributions, restrictive permissions are placed on the standard Apache runtime directory such that the directory is not readable to others. This can cause problems with mod_wsgi because the user that the Apache child processes run as will subsequently not have the required permissions to access the directory to be able to connect to the sockets. When this occurs, a '503 Service Temporarily Unavailable' error response would be received by the client. To resolve the problem, the WSGISocketPrefix directive should be defined to point at an alternate location. The value may be a location relative to the Apache root directory, or an absolute path. On systems which restrict access to the standard Apache runtime directory, they normally provide an alternate directory for placing sockets and lock files used by Apache modules. This directory is usually called 'run' and to make use of this directory the WSGISocketPrefix directive would be set as follows:: WSGISocketPrefix run/wsgi Note, do not put the sockets in the system temporary working directory. That is, do not go making the prefix '/tmp/wsgi'. The directory should be one that is only writable by 'root' user, or if not starting Apache as 'root', the user that Apache is started as. Note that the WSGISocketPrefix directive and corresponding features are not available on Windows or when running Apache 1.3. mod_wsgi-5.0.0/docs/configuration-directives/WSGITrustedProxies.rst000066400000000000000000000010551452636074700255310ustar00rootroot00000000000000================== WSGITrustedProxies ================== :Description: Specify a list of trusted proxies. :Syntax: ``WSGITrustedProxies`` *ipaddr|(ipaddr-1 ipaddr-2 ...)* :Context: server config, virtual host, directory, .htaccess :Override: ``FileInfo`` Used to specify a list of IP addresses for proxies placed in front of the Apache instance which are trusted. This directive only has effect when used in conjunction with the ``WSGITrustedProxyHeaders`` directive. For more details see the documentation for the ``WSGITrustedProxyHeaders`` directive. mod_wsgi-5.0.0/docs/configuration-directives/WSGITrustedProxyHeaders.rst000066400000000000000000000236551452636074700265270ustar00rootroot00000000000000======================= WSGITrustedProxyHeaders ======================= :Description: Specify a list of trusted proxy headers. :Syntax: ``WSGITrustedProxyHeaders`` *header|(header-1 header-2 ...)* :Context: server config, virtual host, directory, .htaccess :Override: ``FileInfo`` When trusted proxies are designated, this is used to specify the headers which are used to convey information from a proxy to a web server behind the proxy that are to be trusted. The IP addresses of the proxies to be trusted should be specified using the ``WSGITrustedProxies`` directive. As there are multiple conventions for what headers are used to convey information from the proxy to the web server you need to specify the specific header from a supported list of headers for a particular purpose that you want to trust using the ``WSGITrustedProxyHeaders`` directive. When a request is then received from a trusted proxy, only the header from the set of headers for that particular purpose is passed through to the WSGI application and all others will be dropped. If a request was instead from an IP address which isn't a trusted proxy, then all headers in that set of headers will be dropped and not passed through. Depending on the purpose of the header, modifications will be made to other special variables passed through to the WSGI application. It is these other variables which is what the WSGI application should consult and the original header should never be consulted, with it only being provided as an indication of which header was used to set the special variable. The different sets of supported headers used by proxies are as follows. For passing through the IP address of the remote HTTP client the supported headers are: * X-Forwarded-For * X-Client-IP * X-Real-IP You should select only one of these headers as the authoritative source for the IP address of the remote HTTP client as sent by the proxy. Never select multiple headers because if you do which will be used is indeterminate. The de-facto standard for this type of header is ``X-Forwarded-For`` and it is recommended that it be used if your proxy supports it. The configuration might therefore be:: WSGITrustedProxies 1.2.3.4 WSGITrustedProxyHeaders X-Forwarded-For With this configuration, when a request is received from the trusted proxy only the ``X-Forwarded-For`` header will be passed through to the WSGI application. This will be done following CGI convention as used by WSGI, namely in the ``HTTP_X_FORWARDED_FOR`` variable. For this set of headers, the ``REMOTE_ADDR`` CGI variable as used by WSGI will be modified and set to the IP address of the remote HTTP client. A WSGI application in this case should always use ``REMOTE_ADDR`` and never consult the original header files. For passing through the protocol of the original request received by the trusted proxy the supported headers are: * X-Forwarded-HTTPS * X-Forwarded-Proto * X-Forwarded-Scheme * X-Forwarded-SSL * X-HTTPS * X-Scheme You should select only one of these headers as the authoritative source for what protocol was used by the remote HTTP client as sent by the proxy. Never select multiple headers because if you do which will be used is indeterminate. The de-facto standard for this type of header is ``X-Forwarded-Proto`` and it is recommended that it be used if your proxy supports it. The configuration might therefore be:: WSGITrustedProxies 1.2.3.4 WSGITrustedProxyHeaders X-Forwarded-Proto With this configuration, when a request is received from the trusted proxy only the ``X-Forwarded-Proto`` header will be passed through to the WSGI application. This will be done following CGI convention as used by WSGI, namely in the ``HTTP_X_FORWARDED_PROTO`` variable. For this set of headers, the ``wsgi.url_scheme`` variable passed to the WSGI application will be modified to indicate whether the original request used the ``https`` protocol. Note that although it is a convention when using CGI scripts with Apache, the mod_wsgi module removes the ``HTTPS`` variable from the set of variables passed to the WSGI application. You should always use the ``wsgi.url_scheme`` variable in a WSGI application. For passing through the host name targeted by the original request received by the trusted proxy the supported headers are: * X-Forwarded-Host * X-Host You should select only one of these headers as the authoritative source for the host targeted by the original request as sent by the proxy. Never select multiple headers because if you do which will be used is indeterminate. The de-facto standard for this type of header is ``X-Forwarded-Host`` and it is recommended that it be used if your proxy supports it. The configuration might therefore be:: WSGITrustedProxies 1.2.3.4 WSGITrustedProxyHeaders X-Forwarded-Host With this configuration, when a request is received from the trusted proxy only the ``X-Forwarded-Host`` header will be passed through to the WSGI application. This will be done following CGI convention as used by WSGI, namely in the ``HTTP_X_FORWARDED_HOST`` variable. For this set of headers, the ``HTTP_HOST`` variable passed to the WSGI application will be overridden with the value from the header supplied by the proxy. That is, the value from the proxy for the original request will even override any explicit ``Host`` header supplied in the request from the proxy, which in normal cases would be the host of the web server. A WSGI application should always consult the ``HTTP_HOST`` variable and not the separate header supplied by the proxy. For passing through the port targeted by the original request received by the trusted proxy, the only supported header is: * X-Forwarded-Port Although it is the only supported header, you still must select if as a trusted header to have it processed in the same way as other trusted headers. The configuration might therefore be:: WSGITrustedProxies 1.2.3.4 WSGITrustedProxyHeaders X-Forwarded-Port With this configuration, when a request is received from the trusted proxy only the ``X-Forwarded-Port`` header will be passed through to the WSGI application. This will be done following CGI convention as used by WSGI, namely in the ``HTTP_X_FORWARDED_PORT`` variable. For this header, the ``SERVER_PORT`` variable passed to the WSGI application will be overridden with the value from the header supplied by the proxy. A WSGI application should always consult the ``SERVER_PORT`` variable and not the separate header supplied by the proxy. For passing through the host name of any proxy, to use in overriding the host name of the web server, the only supported header is: * X-Forwarded-Server Although it is the only supported header, you still must select if as a trusted header to have it processed in the same way as other trusted headers. The configuration might therefore be:: WSGITrustedProxies 1.2.3.4 WSGITrustedProxyHeaders X-Forwarded-Server With this configuration, when a request is received from the trusted proxy only the ``X-Forwarded-Server`` header will be passed through to the WSGI application. This will be done following CGI convention as used by WSGI, namely in the ``HTTP_X_FORWARDED_SERVER`` variable. For this header, the ``SERVER_NAME`` variable passed to the WSGI application will be overridden with the value from the header supplied by the proxy. A WSGI application should always consult the ``SERVER_NAME`` variable and not the separate header supplied by the proxy. For passing through the apparent URL sub path of a web application, as mapped by the trusted proxy, the supported headers are: * X-Script-Name * X-Forwarded-Script-Name You should select only one of these headers as the authoritative source for the host targeted by the original request as sent by the proxy. Never select multiple headers because if you do which will be used is indeterminate. The configuration might therefore be:: WSGITrustedProxies 1.2.3.4 WSGITrustedProxyHeaders X-Script-Name With this configuration, when a request is received from the trusted proxy only the ``X-Script-Name`` header will be passed through to the WSGI application. This will be done following CGI convention as used by WSGI, namely in the ``HTTP_X_SCRIPT_NAME`` variable. For this header, the ``SCRIPT_NAME`` variable passed to the WSGI application will be overridden with the value from the header supplied by the proxy. A WSGI application should always consult the ``SCRIPT_NAME`` variable and not the separate header supplied by the proxy. Examples above show using a single header of a specific purpose at one time. When you need to trust multiple headers for different purposes, you can list them separated by spaces using one instance of ``WSGITrustedProxyHeaders``:: WSGITrustedProxyHeaders X-Forwarded-For X-Forwarded-Host X-Forwarded-Port or in separate directives:: WSGITrustedProxyHeaders X-Forwarded-For WSGITrustedProxyHeaders X-Forwarded-Host WSGITrustedProxyHeaders X-Forwarded-Port As already highlighted you should only list one header for a specific purpose when there are multiple conventions for what header to use. Which you use will depend on the configuration of your proxy. You should only trust headers which are always set by the proxy, never trust headers which are optionally set by proxies because if not overridden by a proxy, a remote client could still supply the header. Also remember that in general you should not consult the proxied headers themselves, but instead consult the special variables set from those headers which are passed to the WSGI application and which are defined as being special to WSGI. As illustration of how such special variables are used, consider for example the notes in the WSGI specification around URL reconstruction. * https://peps.python.org/pep-3333/#url-reconstruction Finally, if using this feature to trust proxies and designated headers, do not enable in any WSGI framework or application separate functionality it may have for also processing the proxy headers. You should only rely on what mod_wsgi has done to update variables special to WSGI. mod_wsgi-5.0.0/docs/configuration.rst000066400000000000000000000025471452636074700177030ustar00rootroot00000000000000============= Configuration ============= .. toctree:: :maxdepth: 2 configuration-directives/WSGIAcceptMutex configuration-directives/WSGIAccessScript configuration-directives/WSGIApplicationGroup configuration-directives/WSGIAuthGroupScript configuration-directives/WSGIAuthUserScript configuration-directives/WSGICallableObject configuration-directives/WSGICaseSensitivity configuration-directives/WSGIChunkedRequest configuration-directives/WSGIDaemonProcess configuration-directives/WSGIImportScript configuration-directives/WSGILazyInitialization configuration-directives/WSGIPassAuthorization configuration-directives/WSGIProcessGroup configuration-directives/WSGIPythonEggs configuration-directives/WSGIPythonHome configuration-directives/WSGIPythonOptimize configuration-directives/WSGIPythonPath configuration-directives/WSGIRestrictEmbedded configuration-directives/WSGIRestrictProcess configuration-directives/WSGIRestrictSignal configuration-directives/WSGIRestrictStdin configuration-directives/WSGIRestrictStdout configuration-directives/WSGIScriptAlias configuration-directives/WSGIScriptAliasMatch configuration-directives/WSGIScriptReloading configuration-directives/WSGISocketPrefix configuration-directives/WSGITrustedProxies configuration-directives/WSGITrustedProxyHeaders mod_wsgi-5.0.0/docs/contributing.rst000066400000000000000000000134421452636074700175370ustar00rootroot00000000000000============ Contributing ============ The mod_wsgi package is a solo effort by Graham Dumpleton. The package is developed purely in the author's spare time and is not funded in any way by a company, nor is it developed for a specific companies requirements. In fact the author doesn't even develop it for his own needs. It is developed purely because it represents an interesting technical challenge and not because the author needs it himself to host a significant web site. How to make a donation ---------------------- If you use mod_wsgi and wish to show your appreciation, donations can be made via `PayPal `_ or an Amazon (USA store only) gift certificate sent to Graham.Dumpleton at gmail dot com. A suggested formula for how much to donate is: * If using mod_wsgi for personal use, then consider donating what you would pay for one months worth of a single host used to run your own site. * If using mod_wsgi for a company web site, then consider donating what you would pay for two months worth of a single host used to run that site. * If using mod_wsgi as part of a web hosting service which you then charge other people for using, then consider donating what you would pay for three months worth of a single host used to run that site. In other words, if you feel inclined, donate an amount commensurate with how much benefit you are getting from mod_wsgi. The reference to the cost of hosting is used at it reflects in some way how much you can afford or might be willing to pay for a hosting service yourself. On that basis, donations might realistically range from $5 up to $150 or more. Obviously where your company spends ridiculous amounts of money on web hosting you can instead elect to donate something more within the range stated above rather than how much you actually spend on web hosting services. Now for the reality, which is that it is very rare that a company will ever donate any money to an Open Source project. As such, when donations have occassionally been received (which doesn't happen very often), they are from individuals using mod_wsgi themselves. Some people do openly begrudge Open Source projects soliciting donations, but the amounts received overall are so insignificant in comparison to how much effort is generally put into projects and what a developer would need to survive that anything received is more a symbolic gesture, more than anything else, of ones appreciation. Given that donations invariably are from individuals, do know that they are accepted with much gratitude and appreciation in return that you are at least, even if companies aren't, trying to help support Open Source projects in some way. How else can you donate ----------------------- If you are an author of a book related to Apache, Python, Docker or any other technologies which go into providing web hosting services, then will also happily accept an electronic copy of the book for reference. Still don't think a monetary contribution is something you would do, you can also simply send a Twitter message to the author expressing your appreciation. You will be surprised how far positive encouragement and appreciation can go with people who work on Open Source projects. This is because in part satisfaction comes from knowing people are benefiting from the work being done. If you never do or say anything, then Open Source developers will never know that you do appreciate the work they do, so don't be quiet when an Open Source project is of value to you, at least say 'Thank You'. How are donations used ---------------------- Any monetary donations typically go towards buying clothes, toys, music, books and apps for the authors 2 children. They are therefore used as a special treat for the authors kids. Source code contributions ------------------------- You might be thinking, what about source code contributions. Although it would be great for this project to grow to have multiple developers working on the code and documentation, reality is that working inside of Apache and the Python C APIs is quite specialised. It isn't therefore the most attractive of projects in that regard. If however you are keen, then would love to hear from you. Open Source free loaders ------------------------ If you are the sort of person who thinks that the Internet exists only to provide you with free stuff and where you think everyone out there exists purely to help you work out your problems, then it may be better that you go use some other WSGI server project. Even if you don't contribute as described above, if you at least recognise that other people are giving up their time to help you and that you put in some effort yourself to resolve a problem first, and then explain it properly in some detail to others when seeking help, providing answers to any questions asked of you, then you will still be helped. The worst sort of people, which hopefully you don't want to be one of, are those who simply say something is broken but will not provide sufficient details, thereby forcing other people to waste huge amounts of time dragging out the information required to help you, or having to guess what your problem is. It is people in this latter category which are becoming a significant drain on the time of developers of Open Source projects and which are a part of why so many Open Source developers are experiencing burnout. So if you are the sort to expect people to help you, complain about things when the problem is really your own unwillingness to learn, and generally give nothing positive in return, even if only encouragement, then don't expect to be helped. Your like has caused too much damage in the past already to any number of Open Source projects and will not be tolerated here. The mental health of Open Source developers is more important than you are. mod_wsgi-5.0.0/docs/finding-help.rst000066400000000000000000000064111452636074700173720ustar00rootroot00000000000000============ Finding Help ============ If after you have gone through all the available documentation you still cannot work out how to do something or can't resolve a problem you are having, use the mod_wsgi mailing list to post your question. The mailing list is hosted by Google Groups at: * http://groups.google.com/group/modwsgi You do not need to have a Google email account as Google Groups allows you to register external email addresses as well. Please use the mailing list in preference to raising a ticket in the issue tracker, unless you are somewhat certain that the problem is a bug in mod_wsgi and not just some environment issue related to your application, any third party packages being used or the operating system. It is much easier to have a discussion on the mailing list than the issue tracker. The mailing list also has many people participating, or at least reading, so you have people with a broad experience with many third party Python web packages and operating systems who may be able to help. If the problem is potentially more an issue with a third party package or the operating system rather than mod_wsgi, you might also consider asking on any mailing list related to the third party package instead. A final option is to ask your question on StackOverflow, if a programming question, or ServerFault, if an administration issue. These sites allow a broad range of questions about many topics with quite a large user base of sometimes knowledgeable people. Do be aware though that the only general forum that is monitored is the mod_wsgi mailing list, so use it if you want an informed answer for a mod_wsgi specific question. Remember that people on the mailing list are volunteering their time to help and don't get paid for answering questions. Thus, it is in your interest not to annoy them too much. No matter which forum you use, when asking questions, it is always helpful to detail the following: 1. Which version of mod_wsgi you are using and if using a packaged distribution, who provided the distribution. If you are not using the latest version, then upgrade first and verify the problem still occurs with the latest version. 2. Which version of Python you are using and if using a packaged distribution, who provided the distribution. 3. Which version of Apache you are using and if using a packaged distribution, who provided the distribution. If not using latest version of Apache available, then consider upgrading and trying again. 4. What operating system you are using. 5. Details on any third party packages being used and what versions of those packages. 6. The mod_wsgi configuration you are using from Apache configuration files. In particular you should indicate whether you are using mod_wsgi embedded mode or daemon mode. Also can be helpful to indicate what MPM Apache has been compiled for and whether mod_php or mod_python are being loaded into the same Apache instance. 7. Relevant error messages from the Apache error logs. Specifically, don't just quote the single line you think shows the error message. Instead, also show the lines before and after that point. These other lines from the error logs may show supplemental error messages from Apache or mod_wsgi or provide Python traceback information. mod_wsgi-5.0.0/docs/getting-started.rst000066400000000000000000000023371452636074700201360ustar00rootroot00000000000000=============== Getting Started =============== If starting out with mod_wsgi it is recommended you start out with a simple 'Hello World' type application. Do not attempt to use a Python web application dependent on a web framework such as Django, Flask or Pyramid until you have got a basic 'Hello World' application running first. The simpler WSGI application will validate that your mod_wsgi installation is working okay and that you at least understand the basics of configuring Apache. You can find a simple 'Hello World' WSGI application, along with setup instructions for the traditional way of setting up Apache and mod_wsgi, described in the :doc:`../user-guides/quick-configuration-guide`. For a bit more in-depth information and additional examples see the :doc:`../user-guides/configuration-guidelines`. Note that unless you are using Windows, where such a choice is not available, you should always use daemon mode of mod_wsgi. This is not the default mode, so you will need to ensure you follow the instructions to enable daemon mode. For a simpler way of running a Python WSGI application using mod_wsgi, also checkout ``mod_wsgi-express``, details of which can currently be found at: https://pypi.python.org/pypi/mod_wsgi mod_wsgi-5.0.0/docs/index.rst000066400000000000000000000032261452636074700161360ustar00rootroot00000000000000======== mod_wsgi ======== The mod_wsgi package implements a simple to use Apache module which can host any Python web application which supports the Python WSGI_ specification. The package can be installed in two different ways depending on your requirements. The first is as a traditional Apache module installed into an existing Apache installation. Following this path you will need to manually configure Apache to load mod_wsgi and pass through web requests to your WSGI application. The second way of installing mod_wsgi is to install it from PyPI_ using the Python ``pip`` command. This builds and installs mod_wsgi into your Python installation or virtual environment. The program ``mod_wsgi-express`` will then be available, allowing you to run up Apache with mod_wsgi from the command line with an automatically generated configuration. This approach does not require you to perform any configuration of Apache yourself. Both installation types are suitable for production deployments. The latter approach using ``mod_wsgi-express`` is the best solution if wishing to use Apache and mod_wsgi within a Docker container to host your WSGI application. It is also a better choice when using mod_wsgi during the development of your Python web application as you will be able to run it directly from your terminal. .. _WSGI: http://www.python.org/dev/peps/pep-3333/ .. _PyPI: http://pypi.python.org/pypi/mod_wsgi .. toctree:: :maxdepth: 1 :hidden: project-status security-issues getting-started requirements installation troubleshooting user-guides configuration finding-help reporting-bugs contributing source-code release-notes mod_wsgi-5.0.0/docs/installation.rst000066400000000000000000000017641452636074700175350ustar00rootroot00000000000000============ Installation ============ The mod_wsgi package can be installed from source code or may also be available as a pre built binary package as part of your Linux distribution. Do be aware though that Linux distributions generally ship out of date versions of mod_wsgi and for long term support (LTS) versions of Linux can be anything up to about 5 years old. Those older versions are not supported in any way even though they are part of a so called LTS version of Linux. If you want support and want to ensure you have the most up to date and bug free version of mod_wsgi, you should consider building and installing mod_wsgi from the source code. For instructions on how to compile mod_wsgi from the source code for UNIX like operating systems such as Linux and MacOS X see: * :doc:`user-guides/quick-installation-guide` * :doc:`user-guides/installation-on-macosx` If you are on Windows, you should instead use: * https://github.com/GrahamDumpleton/mod_wsgi/blob/develop/win32/README.rst mod_wsgi-5.0.0/docs/make.bat000066400000000000000000000150611452636074700157020ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. xml to make Docutils-native XML files echo. pseudoxml to make pseudoxml-XML files for display purposes echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) %SPHINXBUILD% 2> nul if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\mod_wsgi.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\mod_wsgi.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdf" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf cd %BUILDDIR%/.. echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "latexpdfja" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex cd %BUILDDIR%/latex make all-pdf-ja cd %BUILDDIR%/.. echo. echo.Build finished; the PDF files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) if "%1" == "xml" ( %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml if errorlevel 1 exit /b 1 echo. echo.Build finished. The XML files are in %BUILDDIR%/xml. goto end ) if "%1" == "pseudoxml" ( %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml if errorlevel 1 exit /b 1 echo. echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. goto end ) :end mod_wsgi-5.0.0/docs/project-status.rst000066400000000000000000000015371452636074700200210ustar00rootroot00000000000000============== Project Status ============== The mod_wsgi project is still being developed and maintained. The available time of the sole developer is however limited. As a result, progress may appear to be slow. In general, the documentation is in a bit of a mess right now and somewhat outdated, so if you can't find something then ask on the mod_wsgi mailing list for help. Also check out the :doc:`release-notes` as they at least are being updated. A lot of the more recent changes are being made with the aim of making it a lot easier to deploy Apache with mod_wsgi in Docker based environments. Changes included the ability to install mod_wsgi using ``pip``, along with an admin command called ``mod_wsgi-express`` which provides a really simple way of starting up Apache and mod_wsgi from the command line with an automatically generated configuration. mod_wsgi-5.0.0/docs/release-notes.rst000066400000000000000000000064331452636074700176000ustar00rootroot00000000000000============= Release Notes ============= .. toctree:: :maxdepth: 2 release-notes/version-5.0.0 release-notes/version-4.9.4 release-notes/version-4.9.3 release-notes/version-4.9.2 release-notes/version-4.9.1 release-notes/version-4.9.0 release-notes/version-4.8.0 release-notes/version-4.7.1 release-notes/version-4.7.0 release-notes/version-4.6.8 release-notes/version-4.6.7 release-notes/version-4.6.6 release-notes/version-4.6.5 release-notes/version-4.6.4 release-notes/version-4.6.3 release-notes/version-4.6.2 release-notes/version-4.6.1 release-notes/version-4.6.0 release-notes/version-4.5.24 release-notes/version-4.5.23 release-notes/version-4.5.22 release-notes/version-4.5.21 release-notes/version-4.5.20 release-notes/version-4.5.19 release-notes/version-4.5.18 release-notes/version-4.5.17 release-notes/version-4.5.16 release-notes/version-4.5.15 release-notes/version-4.5.14 release-notes/version-4.5.13 release-notes/version-4.5.12 release-notes/version-4.5.11 release-notes/version-4.5.10 release-notes/version-4.5.9 release-notes/version-4.5.8 release-notes/version-4.5.7 release-notes/version-4.5.6 release-notes/version-4.5.5 release-notes/version-4.5.4 release-notes/version-4.5.3 release-notes/version-4.5.2 release-notes/version-4.5.1 release-notes/version-4.5.0 release-notes/version-4.4.23 release-notes/version-4.4.22 release-notes/version-4.4.21 release-notes/version-4.4.20 release-notes/version-4.4.19 release-notes/version-4.4.18 release-notes/version-4.4.17 release-notes/version-4.4.16 release-notes/version-4.4.15 release-notes/version-4.4.14 release-notes/version-4.4.13 release-notes/version-4.4.12 release-notes/version-4.4.11 release-notes/version-4.4.10 release-notes/version-4.4.9 release-notes/version-4.4.8 release-notes/version-4.4.7 release-notes/version-4.4.6 release-notes/version-4.4.5 release-notes/version-4.4.4 release-notes/version-4.4.3 release-notes/version-4.4.2 release-notes/version-4.4.1 release-notes/version-4.4.0 release-notes/version-4.3.2 release-notes/version-4.3.1 release-notes/version-4.3.0 release-notes/version-4.2.8 release-notes/version-4.2.7 release-notes/version-4.2.6 release-notes/version-4.2.5 release-notes/version-4.2.4 release-notes/version-4.2.3 release-notes/version-4.2.2 release-notes/version-4.2.1 release-notes/version-4.2.0 release-notes/version-4.1.3 release-notes/version-4.1.2 release-notes/version-4.1.1 release-notes/version-4.1.0 release-notes/version-4.0 release-notes/version-3.5 release-notes/version-3.4 release-notes/version-3.3 release-notes/version-3.2 release-notes/version-3.1 release-notes/version-3.0 release-notes/version-2.8 release-notes/version-2.7 release-notes/version-2.6 release-notes/version-2.5 release-notes/version-2.4 release-notes/version-2.3 release-notes/version-2.2 release-notes/version-2.1 release-notes/version-2.0 release-notes/version-1.6 release-notes/version-1.5 release-notes/version-1.4 release-notes/version-1.3 release-notes/version-1.2 release-notes/version-1.1 release-notes/version-1.0 mod_wsgi-5.0.0/docs/release-notes/000077500000000000000000000000001452636074700170405ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/release-notes/version-1.0.rst000066400000000000000000000002161452636074700215520ustar00rootroot00000000000000=========== Version 1.0 =========== Version 1.0 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-1.0.tar.gz mod_wsgi-5.0.0/docs/release-notes/version-1.1.rst000066400000000000000000000047111452636074700215570ustar00rootroot00000000000000=========== Version 1.1 =========== Version 1.1 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-1.1.tar.gz Bug Fixes --------- 1. Fix bug which could result in processes crashing when multiple threads attempt to write to sys.stderr or sys.stdout at the same time. See: https://code.google.com/archive/p/modwsgi/issues/30 Chance of this occuring was small, as was contingent on code writing out strings which contained an embedded newline but no terminating new line, thereby triggering the internal line caching code. 2. In error case when not able to release interpreter, was wrongly trying to release Python GIL around code to unlock module mutex when didn't actually have the GIL acquired in the first place. Didn't strictly need to be releasing GIL when releasing lock as it shouldn't block anyway, so don't do this even in case where had the Python GIL. This problem would only have been encountered in situation where Python had failed in a major way to begin with. 3. Incorrectly trying to output Python exception details when Python GIL would not have been held. This problem would only have been encountered in situation where Python had failed in a major way to begin with. 4. Fix location of Python object reference count decrements to avoid decrement reference count on null pointer. Would only have caused a problem if Python was in some sort of corrupted state to begin with as the object which the reference count was being performed on should always exist. 5. Replace normal Apache connection setup in daemon processes with equivalent code that avoids possibility that other Apache modules will insert their own connection level input/output filters. This is needed as running WSGI applications in daemon processes where requests were arriving to Apache as HTTPS requests could cause daemon processes to crash. See: https://code.google.com/archive/p/modwsgi/issues/33 This was only occuring for some HTTPS configurations, but not known what exactly was different about those configurations to cause the problem. Actually possible that the real problem was mod_logio as described below. 6. Substitute optional ap_logio_add_bytes_out() function provided by the mod_logio module when loaded and when handling request in daemon process. This is needed to prevent core output filters calling this function and triggering a crash due to configuration for mod_logio not being setup. See: https://code.google.com/archive/p/modwsgi/issues/34 mod_wsgi-5.0.0/docs/release-notes/version-1.2.rst000066400000000000000000000072201452636074700215560ustar00rootroot00000000000000=========== Version 1.2 =========== Version 1.2 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-1.2.tar.gz Bug Fixes --------- 1. When headers are flushed by mod_wsgi is not strictly compliant with the WSGI specification. In particular the specification says: The start_response callable must not actually transmit the response headers. Instead, it must store them for the server or gateway to transmit only after the first iteration of the application return value that yields a non-empty string, or upon the application's first invocation of the write() callable. In other words, response headers must not be sent until there is actual body data available, or until the application's returned iterable is exhausted. (The only possible exception to this rule is if the response headers explicitly include a Content-Length of zero.) In mod_wsgi when an iterable was returned from the application, the headers were being flushed even if the string was empty. See: https://code.google.com/archive/p/modwsgi/issues/35 2. Calling start_response() a second time to supply exception information and status to replace prior response headers and status, was resulting in a process crash when there had actually been response content sent and the existing response headers and status flushed and written back to the client. See: https://code.google.com/archive/p/modwsgi/issues/36 3. Added additional logging to highlight instance where WSGI script file was removed in between the time that Apache matched request to it and the WSGI script file was loaded and the request passed to it. These changes also log something if the attempt to stat the WSGI script file in the daemon process fails due to inadequate permissions or other reasons. 4. Fixed a few instances where logging via request object before fake request object in daemon process had been constructed properly. The particular cases would only have been triggered if something other than mod_wsgi code with Apache child process had tried to communicate with the daemon process. 5. Fixed problem when Apache 1.3 or 2.0 was being used, where the automatically determined default for the application group (interpreter) name would be wrong where the URL had repeating slashes in it after the leading portion of the URL which mapped to the mount point of the WSGI application. See: https://code.google.com/archive/p/modwsgi/issues/39 In particular, for a URL with the repeating slash the application group name would have a trailing slash appended when it shouldn't. The consequences of this are that two instances of the WSGI application could end up being loaded into the same process, doubling the memory usage for the process. Besides the additional memory use, this would in general not be an issue as most applications would be designed to work within multi process environment of Apache. If however a specific application was designed to only work within a single process (interpreter instance), as would occur when Windows was being used, or a single daemon process with daemon mode, then there may be issues as requests which had a repeating slash in the URL would not access the same application data as those without. Note, this problem could only arise where WSGIApplicationGroup directive wasn't used and thus default value being used. Or the value '%{RESOURCE}' was specified as argument to WSGIApplicationGroup, this being the same as the default. 6. Fixed problem whereby status of sub processes created from mod_wsgi daemon processes were not being caught properly. This was because mod_wsgi was wrongly blocking SIGCHLD signal. See: https://code.google.com/archive/p/modwsgi/issues/38 mod_wsgi-5.0.0/docs/release-notes/version-1.3.rst000066400000000000000000000034121452636074700215560ustar00rootroot00000000000000=========== Version 1.3 =========== Version 1.3 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-1.3.tar.gz Bug Fixes --------- 1. Fix bug whereby mod_wsgi daemon process could hang when a request with content greater than UNIX socket buffer size, was directed at a WSGI application resource handler which in turn returned a response, greater than UNIX socket buffer size, without first consuming the request content. There were two aspects to this problem, the first is that the above would trigger that specific request to hang. Second was that at the point of the hang, the Python GIL hadn't been released, and so all other threads were blocked from running any Python code resulting in whole process effectively hanging. Code now correctly ensures that Python GIL is released prior to going into potentially blocking operation. Secondly, where mutual deadlock between Apache child process and mod_wsgi daemon process, timeout as defined by the standard Apache 'Timeout' directive will now kick in and remaining request content discarded by Apache child process so that thread in the daemon process can continue and break out of its hung state. Although this can still result in request thread being in a hung state until the timeout occurs, this mirrors exactly what would happen if running a WSGI application using a CGI-WSGI bridge behind Apache mod_cgi module. A better solution which would avoid the hung state altogether is still being investigated. Note that this scenario shouldn't ever eventuate for a correctly implemented and functioning web application, however it is feasible that it could be triggered as a result of spambots which attempt to POST data randomly to sites with the hope they find a wiki system with an unprotected comment system. mod_wsgi-5.0.0/docs/release-notes/version-1.4.rst000066400000000000000000000016011452636074700215550ustar00rootroot00000000000000=========== Version 1.4 =========== Version 1.4 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-1.4.tar.gz Bug Fixes --------- 1. A negative value for content length in response wasn't being rejected. Where invalid header was being returned in response original response status was being returned instead of a 500 error. 2. Fix bug which was resulting in logging destined for !VirtualHost !ErrorLog going missing or ending up in main Apache error log. https://code.google.com/archive/p/modwsgi/issues/79 Features Added -------------- 1. Optimise sending of WSGI environment across to daemon process by reducing number of writes to socket. For daemon mode and a simple hello world application this improves base performance by 40% moving it significantly closer to performance of embedded mode. This is a backport of change from version 2.0 of mod_wsgi. mod_wsgi-5.0.0/docs/release-notes/version-1.5.rst000066400000000000000000000010461452636074700215610ustar00rootroot00000000000000=========== Version 1.5 =========== Version 1.5 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-1.5.tar.gz Bug Fixes --------- 1. Fix bug where listener socket file descriptors for daemon processes were being leaked in Apache parent process on a graceful restart. Also fixes problem where UNIX listener socket was left in filesystem on both graceful restart and graceful shutdown. For details see: https://code.google.com/archive/p/modwsgi/issues/95 This is a backport of change from version 2.2 of mod_wsgi. mod_wsgi-5.0.0/docs/release-notes/version-1.6.rst000066400000000000000000000013461452636074700215650ustar00rootroot00000000000000=========== Version 1.6 =========== Version 1.6 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-1.6.tar.gz **Note that this is a quick followup to version 1.5 of mod_wsgi to rectify significant problem introduced by that release. You should therefore also refer to:** * :doc:`version-1.5`. Bug Fixes --------- 1. Fixed problem introduced in version 1.5 of mod_wsgi whereby use of daemon mode would cause CGI scripts to fail. It is quite possible that the bug could also have caused failures with other Apache modules that relied on registering of cleanup functions against Apache configuration memory pool. For details see: http://groups.google.com/group/modwsgi/browse_frm/thread/79a86f8faffe7dcf mod_wsgi-5.0.0/docs/release-notes/version-2.0.rst000066400000000000000000000536201452636074700215620ustar00rootroot00000000000000=========== Version 2.0 =========== Version 2.0 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.0.tar.gz Note that mod_wsgi 2.0 was originally derived from mod_wsgi 1.0. It has though all changes from later releases in the 1.X branch. Thus also see: * :doc:`version-1.1` * :doc:`version-1.2` * :doc:`version-1.3` Bug Fixes --------- 1. Work around bug in Apache where '100 Continue' response was sent as part of response content if no attempt to read request input before headers and response were generated. Features Changed ---------------- 1. The WSGICaseSensitivity directive can now only be used at global scope within the Apache configuration. This means that individual directories can not be designated as being case sensitive or not. For correct operation therefore, the path names of all script files should treat case the same, one cannot have a mixture. 2. How the WSGIPythonPath directive is interpreted has changed in that '.pth' files in the desiginated directories are honoured. See item 10 in new features section for more information. 3. Removed support for output buffering outside of WSGI specification. In other words, removed the WSGIOutputBuffering directive and associated code. If using a WSGI application which does poor buffering itself, to the extent that performance is affected, you will need to wrap it in a WSGI middleware component that does buffering on its behalf. Features Removed ---------------- 1. The 'Interpreter' option to WSGIReloadMechanism has been removed. This option for interpreter reloading was of limited practical value as many third party modules for Python aren't written in a way to cope with destruction of Python interpreters in a running process. The presence of the feature was just making it harder to implement various new features. 2. The WSGIPythonHome directive is no longer available on Windows systems as Python would ignore it anyway. 3. The WSGIPythonExecutable directive has been removed. This didn't work on Windows or MacOS X systems. On UNIX systems, the WSGIPythonHome directive should be used instead. Not known how one can achieve same on Windows systems. Features Added -------------- 1. The WSGIReloadMechanism now provides the 'Process' option for enabling process reloading when the WSGI script file is changed. Note that this only applies to WSGI script files used for WSGI applications which have been delegated to a mod_wsgi daemon process. Additionally, as of 2.0c5 the use of 'Process' option has been made the default for daemon mode processes. If specifically requiring existing default behaviour, the 'Module' option will need to be specified to indicate script file reloading. If this option is specified for WSGI application run in embedded mode within Apache child processes, the existing default behaviour of reloading just the script file will apply. For more details see: https://code.google.com/archive/p/modwsgi/wikis/ReloadingSourceCode 2. When application is running in embedded mode, and WSGIApacheExtensions directive is set to On, then a Python CObject reference is added to the WSGI application environment as 'apache.request_rec'. This can be passed to C extension modules and can be converted back to a reference to internal Apache request_rec structure thereby allow C extension modules to work against the internal Apache C APIs to implement special features. One example of such special extensions are the Python SWIG bindings for the Apache C API implemented in the separate 'ap_swig_py' package. Because SWIG is being used, and due to thread support within SWIG generated bindings possibly only being usable within the first Python interpreter instance created, it may be the case that the 'ap_swig_py' package an only be used when WSGIApplicationGroup has been set to '%{GLOBAL}'. The 'ap_swig_py' package has not yet been released and is still in development. The package can be obtained from the Subversion repository at: https://bitbucket.org/grahamdumpleton/apswigpy/wikis/Home With the SWIG binding for the Apache API, the intention is that many of the internal features of Apache would then be available. For example:: import apache.httpd, apache.http_core req = apache.httpd.request_rec(environ["apache.request_rec"]) root = apache.http_core.ap_document_root(req) Note that this feature is experimental and may be removed from a future version if insufficient interest in it or in developing SWIG bindings. 3. When Apache 2.0/2.2 is being used, Python script can now be provided to perform the role of an Apache auth provider. This would allow user authentication underlying HTTP Basic (2.0 and 2.2) or Digest (2.2 only) authentication schemes to be done by a Python web application. Do note though that at present the provided authentication script will always run in the context of the Apache child processes and can not be delegated to a distinct daemon process. Apache configuration for defining an auth provider for Basic authentication when using Apache 2.2 would be:: AuthType Basic AuthName "Top Secret" AuthBasicProvider wsgi WSGIAuthUserScript /usr/local/wsgi/scripts/auth.wsgi Require valid-user For Apache 2.0 it would be:: AuthType Basic AuthName "Top Secret" WSGIAuthUserScript /usr/local/wsgi/scripts/auth.wsgi AuthAuthoritative Off Require valid-user The 'auth.wsgi' script would then need to contain a 'check_password()' function with a sample as shown below:: def check_password(environ, user, password): if user == 'spy': if password == 'secret': return True return False return None If using Apache 2.2 and Digest authentication support is built into Apache, then that also may be used:: AuthType Digest AuthName "Top Secret" AuthDigestProvider wsgi WSGIAuthUserScript /usr/local/wsgi/scripts/auth.wsgi Require valid-user The name of the required authentication function for Digest authentication is 'get_realm_hash()'. The result of the function must be 'None' if the user doesn't exist, or a hash string encoding the user name, authentication realm and password:: import md5 def get_realm_hash(environ, user, realm): if user == 'spy': value = md5.new() # user:realm:password value.update('%s:%s:%s' % (user, realm, 'secret')) hash = value.hexdigest() return hash return None By default the auth providers are executed in context of first interpreter created by Python. This can be overridden using the 'application-group' option to the script directive. The namespace for authentication groups is shared with that for application groups defined by WSGIApplicationGroup. If mod_authn_alias is being loaded into Apache, then an aliased auth provider can also be defined:: WSGIAuthUserScript /usr/local/django/mysite/apache/auth.wsgi \ application-group=django WSGIScriptAlias / /usr/local/django/mysite/apache/django.wsgi Order deny,allow Allow from all WSGIApplicationGroup django AuthType Basic AuthName "Django Site" AuthBasicProvider django Require valid-user An authentication script for Django might then be something like:: import os, sys sys.path.append('/usr/local/django') os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings' from django.contrib.auth.models import User from django import db def check_password(environ, user, password): db.reset_queries() kwargs = {'username': user, 'is_active': True} try: try: user = User.objects.get(**kwargs) except User.DoesNotExist: return None if user.check_password(password): return True else: return False finally: db.connection.close() If the WSGIApacheExtensions directive is set to On then 'apache.request_rec' will be passed in 'environ' to the auth provider functions. This may be used in conjunction with C extension modules such as 'ap_swig_py'. For example, it may be used to set attributes in 'req.subprocess_env' which are then in turn passed to the WSGI application through the WSGI environment. Passing of these settings will occur even if the WSGI application itself is running in a daemon process. A further example where this can be useful is where which daemon process is used is dependent on some attribute of the user. For example, if using the Apache configuration:: WSGIDaemonProcess django-admin WSGIDaemonProcess django-users WSGIProcessGroup %{ENV:PROCESS_GROUP} which daemon process the request is delegated to can be controlled from the auth provider:: import apache.httpd def check_password(environ, user, password): db.reset_queries() kwargs = {'username': user, 'is_active': True} try: try: user = User.objects.get(**kwargs) except User.DoesNotExist: return None if user.check_password(password): req = apache.httpd.request_rec(environ["apache.request_rec"]) if user.is_staff: req.subprocess_env["PROCESS_GROUP"] = 'django-admin' else: req.subprocess_env["PROCESS_GROUP"] = 'django-users' return True else: return False finally: db.connection.close() For more details see: https://code.google.com/archive/p/modwsgi/wikis/AccessControlMechanisms 4. When Apache 2.2 is being used, now possible to provide a script file containing a callable which returns the groups that a user is a member of. This can be used in conjunction with a 'group' option to the Apache 'Require' directive. Note that up to mod_wsgi 2.0c3 the option was actually 'wsgi-group'. Apache configuration for defining an auth provider for Basic authentication and subsequent group authorisation would be:: AuthType Basic AuthName "Top Secret" AuthBasicProvider wsgi WSGIAuthUserScript /usr/local/wsgi/scripts/auth.wsgi WSGIAuthGroupScript /usr/local/wsgi/scripts/auth.wsgi Require group secret-agents Require valid-user The 'auth.wsgi' script would then need to contain a 'check_password()' and 'groups_for_user()' function with a sample as shown below:: def check_password(environ, user, password): if user == 'spy': if password == 'secret': return True return False return None def groups_for_user(environ, user): if user == 'spy': return ['secret-agents'] return [''] For more details see: https://code.google.com/archive/p/modwsgi/wikis/AccessControlMechanisms 5. Implemented WSGIDispatchScript directive. This directive can be used to designate a script file in which can be optionally defined any of the functions:: def process_group(environ): return "%{GLOBAL}" def application_group(environ): return "%{GLOBAL}" def callable_object(environ): return "application" This allows for the process group, application group and callable object name for a WSGI application to be programmatically defined rather than be exclusively drawn from the configuration. Each function if wishing to override the value defined by the configuration should return a string object. If None is returned then value defined by the configuration will still be used. By default the script file code will be executed within the context of the '%{GLOBAL}' application group within the Apache child processes (never in the daemon processes). The application group used can be overridden by defining the 'application-group' option to the script directive. Note that up to 2.0c3 the WSGIServerGroup directive was instead provided, but this has now been removed. This feature could be used as part of a mechanism for distributing requests across a number of daemon process groups, but always directing requests from a specific user to the same daemon process. 6. Implemented inactivity-timeout option for WSGIDaemonProcess directive. For example:: WSGIDaemonProcess trac processes=1 threads=15 \ maximum-requests=1000 inactivity-timeout=300 When this option is used, the daemon process will be shutdown, and thence restarted, after no request activity for the defined period (in seconds). The purpose of this option is to allow amount of memory being used by a process to be dropped back to the initial idle state level. This option would be used where the application delegated to the daemon process was used infrequently and thus it would be preferable to reclaim the memory when the application is not in use. 7. In daemon processes, the HOME environment variable is now overridden such that its initial value when a new Python sub interpreter is created is the same as the home directory of the user that the daemon process is running as. This is to give some certainty as to its value as otherwise the HOME environment variable may be that of the root user, a particular user, or the user that ran 'sudo' to start Apache. This is because HOME environment variable will be inherited from environment of user that Apache is started as and has no relationship to the user that the process is actually run as. Note that the HOME environment variable is not updated for embedded mode as this would change the environment of code running under different Apache modules, such as mod_php and mod_perl. Not seen as being good practice to modify the environment of other systems. Once consequence of the HOME environment variable being set correctly for daemon processes at least, is that the default location calculated for Python egg cache should then be correct. If running in embedded mode, would still be necessary to manually override Python egg cache location. 8. In daemon processes, the initial current working directory of the process will be set to the home directory of the user that the process runs as, or as specified by the 'home' option to the WSGIDaemonProcess directive. 9. Added 'stack-size' option to WSGIDaemonProcess so that per thread stack size can be overridden for processes in the daemon process group. This can be required on Linux where the default stack size for threads is the same as the default user process stack size, that being 8MB. When running in a VPS provided by a web hosting company, where they for some reason seem to take into consideration the virtual memory size as well as the resident memory size when calculating your process limits, it is better to drop the per thread stack size down to a value closer to 512KB. For example:: WSGIDaemonProcess example processes=2 threads=25 stack-size=524288 10. Added some direct support into mod_wsgi for virtual environments for Python such as virtualenv and workingenv. The first approach to configuration is to use WSGIPythonPath directive at global scope in apache configuration. For example:: # workingenv WSGIPythonPath /some/path/env/lib/python2.3 # virtualenv WSGIPythonPath /some/path/env/lib/python2.3/site-packages The path you have to specify is slightly different depending on whether you use workingenv or virtualenv packages. Previously the WSGIPythonPath directive would just override the ``PYTHONPATH`` environment variable. Instead it now calls ``site.addsitedir()`` for any specified directories, thus triggering the reading of any .pth files and the subsequent addition of further directories there specified to sys.path. Note that directories added with WSGIPythonPath only apply to applications running in embedded mode. If you want to specify directories for daemon processes, you can use the 'python-path' option to WSGIDaemonProcess. For example:: WSGIDaemonProcess turbogears processes=5 threads=1 \ user=site1 group=site1 maximum-requests=1000 \ python-path=/some/path/env/lib/python2.3/site-packages WSGIScriptAlias / /some/path/scripts/turbogears.wsgi WSGIProcessGroup turbogears WSGIApplicationGroup %{GLOBAL} WSGIReloadMechanism Process Do note that anything defined in the standard Python site-packages directories takes precedence over directories added using the mechanisms described above. Thus, if wanting to use these virtual environments all the time, your standard Python installation effectively needs to have an empty site-packages directory. Alternatively, on UNIX systems you can use the WSGIPythonHome directive to point to a virtual environment which contains an empty 'site-packages'. End result is that with these options, should be very easy to have different daemon process groups using different Python virtual environments without any fiddles having to be done in the WSGI script file itself. For more details see: https://code.google.com/archive/p/modwsgi/wikis/VirtualEnvironments 11. Added WSGIPythonEggs directive and corresponding 'python-eggs' option for WSGIDaemonProcess directive. These allow the location of the Python egg cache directive to be set for applications running in embedded mode or in the designated daemon processes. These options have the same affect as if the 'PYTHON_EGG_CACHE' environment variable had been set. 12. Implement 'deadlock-timeout' option for WSGIDaemonProcess for detecting Python programs that hold the GIL for extended periods, thus perhaps indicating that process has frozen or has become unresponsive. The default value for the timeout is 300 seconds. 13. Added support for providing an access control script. This equates to the access handler phase of Apache and would be use to deny access to a subset of URLs based on the details of the remote client. The path to the script is defined using the WSGIAccessScript directive:: WSGIAccessScript /usr/local/wsgi/script/access.wsgi The name of the function that must exist in the script file is 'allow_access()'. It must return True or False:: def allow_access(environ, host): return host in ['localhost', '::1'] This function will always be executed in the context of the Apache child processes even if it is controlling access to a WSGI application which has been delegated to a daemon process. By default the function will be executed in the context of the main Python interpreter, ie., '%{GLOBAL}'. This can be overridden by using the 'application-group' option to the WSGIAccessScript directive:: WSGIAccessScript /usr/local/wsgi/script/access.wsgi application-group=admin For more details see documentation on [AccessControlMechanisms Access Control Mechanisms] 14. Added support for loading a script file at the time that process is first started. This would allow modules related to an application to be preloaded into an interpreter immediately rather than it only occuring when the first request arrives for that application. The directive for designating the script to load is WSGIImportScript. The directive can only be used at global scope within the Apache configuration. It is necessary to designate both the application group, and if dameon mode support is available, the process group:: WSGIImportScript /usr/local/wsgi/script/import.wsgi \ process-group=%{GLOBAL} application-group=django 14. Add "--disable-embedded" option to "configure" script so that ability to run a WSGI application in embedded mode can be disabled completely. Also added the directive WSGIRestrictEmbedded so that ability to run a WSGI application in embedded mode can be disabled easily if support for embedde mode is still compiled in. 15. Added support for optional WSGI extension wsgi.file_wrapper. On UNIX systems and when Apache 2.X is being used, if the wrapped file like object relates to a regular file then additional optimisations will be applied to improve the performance of returning the file in a response. 16. Added 'display-name' option for WSGIDaemonProcess. On operating systems where it works, this should allow displayed name of daemon process shown by 'ps' to be changed. Note that name will be truncated to whatever the existing length of 'argv[0]' was for the process. 17. When WSGI application generates more content than what was defined by response content length header, excess is discarded. If Apache log level is set to debug, messages will be logged to Apache error log file warning of when generated content length differs to specified content length. 18. Allow WSGIPassAuthorization to be used in .htaccess file if !FileInfo override has been set. This has been allowed as !FileInfo enables ability to use both mod_rewrite and mod_headers, which both provide means of getting at the authorisation header anyway, so no point trying to block it. 19. Optimise sending of WSGI environment across to daemon process by reducing number of writes to socket. For daemon mode and a simple hello world application this improves base performance by 40% moving it significantly closer to performance of embedded mode. 20. Always change a HEAD request into a GET request. This is to ensure that a WSGI application always generates response content. If this isn't done then any Apache output filters will not get to see the response content and if they need to see the response content to generate headers based on it, then the response headers from a HEAD request would be incorrect and not match a GET request as required. If Apache 2.X, this will not however be done if there are no Apache output filters registered which could change the response headers or content. 21. Add option "send-buffer-size" and "receive-buffer-size" to WSGIDaemonProcess for controlling the send and receive buffer sizes of the UNIX socket used to communicate with mod_wsgi daemon processes. This is to work around or limit deadlock problems that can occur in certain cases when the operating system defines a very small default UNIX socket buffer size. 22. When no request content has been read and headers are to be sent back, force a zero length read in order to flush out any '100 Continue' response if expected by client. This is only done for 2xx and 3xx response status values. 23. A negative value for content length in response wasn't being rejected. Where invalid header was being returned in response original response status was being returned instead of a 500 error. mod_wsgi-5.0.0/docs/release-notes/version-2.1.rst000066400000000000000000000007731452636074700215640ustar00rootroot00000000000000=========== Version 2.1 =========== Version 2.1 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.1.tar.gz Bug Fixes --------- 1. Fix bug which was resulting in logging destined for !VirtualHost !ErrorLog going missing or ending up in main Apache error log. https://code.google.com/archive/p/modwsgi/issues/79 2. Fix bug where WSGI application returning None rather than valid iterable causes process to crash. https://code.google.com/archive/p/modwsgi/issues/88 mod_wsgi-5.0.0/docs/release-notes/version-2.2.rst000066400000000000000000000030421452636074700215550ustar00rootroot00000000000000=========== Version 2.2 =========== Version 2.2 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.2.tar.gz **Note: This version was quickly superseded by version 2.3 of mod_wsgi. Version 2.2 should not be used.** Features Changed ---------------- 1. Use official way of setting process names on FreeBSD, NetBSD and OpenBSD. For details see: https://code.google.com/archive/p/modwsgi/issues/90 This is a backport of change from version 3.0 of mod_wsgi. Bug Fixes --------- 1. Fix bug whereby if mod_python is loaded at same time as mod_wsgi the WSGIImportScript directive can cause Apache child processes to crash. For details see: https://code.google.com/archive/p/modwsgi/issues/91 2. Fix bug where mod_wsgi daemon process startup could fail due to old stale UNIX listener socket file as described in: https://code.google.com/archive/p/modwsgi/issues/77 3. Fix bug where listener socket file descriptors for daemon processes were being leaked in Apache parent process on a graceful restart. Also fixes problem where UNIX listener socket was left in filesystem on both graceful restart and graceful shutdown. For details see: https://code.google.com/archive/p/modwsgi/issues/95 4. Fix bug where response was truncated when a null character appeared as first character in block of data being returned from wsgi.file_wrapper. Only occurred when code fell back to using iteration over supplied file like object, rather than optimised method such as sendfile(). https://code.google.com/archive/p/modwsgi/issues/100 mod_wsgi-5.0.0/docs/release-notes/version-2.3.rst000066400000000000000000000022371452636074700215630ustar00rootroot00000000000000=========== Version 2.3 =========== Version 2.3 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.3.tar.gz **Note that this is a quick followup to version 2.2 of mod_wsgi to rectify significant problem introduced by that release. You should therefore also refer to:** * :doc:`version-2.2` Bug Fixes --------- 1. Fixed problem introduced in version 2.2 of mod_wsgi whereby use of daemon mode would cause CGI scripts to fail. It is quite possible that the bug could also have caused failures with other Apache modules that relied on registering of cleanup functions against Apache configuration memory pool. For details see: http://groups.google.com/group/modwsgi/browse_frm/thread/79a86f8faffe7dcf 2. When using setproctitle() on BSD systems, first argument should be a printf style format string with values to fill out per format as additional arguments. Code was supplying value to be displayed as format string which meant that if it contained any printf type format sequences, could cause process to crash as corresponding arguments wouldn't have ben provided. For details see: https://code.google.com/archive/p/modwsgi/issues/90 mod_wsgi-5.0.0/docs/release-notes/version-2.4.rst000066400000000000000000000167611452636074700215730ustar00rootroot00000000000000=========== Version 2.4 =========== Version 2.4 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.4.tar.gz Bug Fixes --------- 1. Compilation would fail on Windows due to daemon mode specific code not being conditionally compiled out on that platform. This was a problem introduced by changes in mod_wsgi 2.3. 2. Fix bug where wrong Apache memory pool used when processing configuration directives at startup. This could later result in memory corruption and may account for problems seen with 'fopen()' errors. See: https://code.google.com/archive/p/modwsgi/issues/78 https://code.google.com/archive/p/modwsgi/issues/108 3. Fix bug where Python interpreter not being destroyed correctly in Apache parent process on an Apache restart. This was resulting in slow memory leak into Apache parent process on each restart. This additional memory usage would then be inherited by all child processes forked from Apache parent process. Note that this change does not help for case where mod_python is also being loaded into Apache as in that case mod_python is responsible for intialising Python and in all available versions of mod_python it still doesn't properly destroy the Python interpreter either and so causes memory leaks which mod_wsgi cannot work around. Also, this doesn't solve problems with the Python interpreter itself leaking memory when destroyed and reinitialised. Such memory leaks in Python seem to occur for some versions of Python on particular platforms. For further details see: https://code.google.com/archive/p/modwsgi/issues/99 4. Fix bug whereby POST requests where 100-continue was expected by client would see request content actually truncated and not be available to WSGI application if application running in daemon mode. See: https://code.google.com/archive/p/modwsgi/issues/121 5. Fix bug where Apache optimisation related to keep alive connections can kick in when using wsgi.file_wrapper with result that if amount of data is between 255 and aproximately 8000 bytes, that a completely empty response will result. This occurs because Apache isn't flushing out the file data straight away but holding it over in case subsequent request on connection arrives. By then the file object used with wsgi.file_wrapper can have been closed and underlying file descriptor will not longer be valid. See: https://code.google.com/archive/p/modwsgi/issues/132 6. Modify how daemon process shutdown request is detected such that no need to block signals in request threads. Doing this caused problems in processes which were run from daemon mode process and which needed to be able to receive signals. New mechanism uses a internal pipe to which signal handler writes a character, with main thread performing a poll on pipe waiting for that character to know when to shutdown. For additional details see: https://code.google.com/archive/p/modwsgi/issues/87 7. Fix bug where excessive transient memory usage could occur when calling read() or readline() on wsgi.input with no argument. See: https://code.google.com/archive/p/modwsgi/issues/126 Note that calling read() with no argument is actually a violation of WSGI specification and any application doing that is not a WSGI compliant application. 8. Fix bug where daemon process would crash if User/Group directives were not specified prior to WSGIDaemonProcess in Apache configuration file. See: https://code.google.com/archive/p/modwsgi/issues/40 9. Fix bug whereby Python exception state wasn't being cleared correctly when error occurred in loading target of WSGIImportScript. See: https://code.google.com/archive/p/modwsgi/issues/117 Features Changed ---------------- 1. No longer populate 'error-notes' field in Apache request object notes table, with details of why WSGI script failed. This has been removed as information can be seen in default Apache multilanguage error documents. Because errors may list paths or user/group information, could be seen as a security risk. Features Added -------------- 1. Added 'mod_wsgi.version' to WSGI environment passed to WSGI application. For details see: https://code.google.com/archive/p/modwsgi/issues/93 2. Added 'process_group' and 'application_group' attributes to mod_wsgi module that is created within each Python interpreter instance. This allows code executed outside of the context of a request handler to know whether it is running in a daemon process group and what it may be called. Similarly, can determine if running in first interpreter or some other sub interpreter. For details see: https://code.google.com/archive/p/modwsgi/issues/27 3. Added closed and isatty attributes to Log object as well as close() method. For wsgi.errors these aren't required, but log object also used for stderr and stdout (when enabled) and code may assume these methods may exist for stderr and stdout. The closed and isatty attributes always yield false and close() will raise a run time error indicating that log cannot be closed. For details see: https://code.google.com/archive/p/modwsgi/issues/82 4. Apache scoreboard cleaned up when daemon processes first initialised to prevent any user code interfering with operation of Apache. For details see: https://code.google.com/archive/p/modwsgi/issues/104 5. When running configure script, can now supply additional options for CPPFLAGS, LDFLAGS and LDLIBS through environment variables. For details see: https://code.google.com/archive/p/modwsgi/issues/107 6. Better checking done on response headers and an explicit error will now be produce if name or value of response header contains an embedded newline. This is done as by allowing embedded newline would cause daemon mode to fail when handing response in Apache child process. In embedded mode, could allow application to pass back malformed response headers to client. For details see: https://code.google.com/archive/p/modwsgi/issues/81 7: Ensure that SYSLIBS linker options from Python configuration used when linking mod_wsgi Apache module. This is now prooving necessary as some Apache distributions are no longer linking system maths library and Python requires it. To avoid problem simply link against mod_wsgi Apache module and system libraries that Python needs. For details see: https://code.google.com/archive/p/modwsgi/issues/115 8: Reorder sys.path after having called site.addsitedir() in WSGIPythonPath and python-path option for WSGIDaemonProcess. This ensures that newly added directories get moved to front of sys.path and that they take precedence over standard directories. This in part avoids need to ensure --no-site-packages option used when creating virtual environments, as shouldn't have an issue with standard directories still overriding additions. For details see: https://code.google.com/archive/p/modwsgi/issues/112 9. Update USER, USERNAME and LOGNAME environment variables if set in daemon process to be the actual user that the process runs as rather than what may be inherited from Apache root process, which would typically be 'root' or the user that executed 'sudo' to start Apache, if they hadn't used '-H' option to 'sudo'. See: https://code.google.com/archive/p/modwsgi/issues/129 10. Build process now inserts what is believed to be the directory where Python shared library is installed, into the library search path before the Python config directory. This should negate the need to ensure that Python shared library is also symlink into the config directory next to the static library as linkers would normally expect it. See: https://code.google.com/archive/p/modwsgi/issues/136 mod_wsgi-5.0.0/docs/release-notes/version-2.5.rst000066400000000000000000000032561452636074700215670ustar00rootroot00000000000000=========== Version 2.5 =========== Version 2.5 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.5.tar.gz For Windows binaries see: https://code.google.com/archive/p/modwsgi/wikis/InstallationOnWindows Note that this release does not support Python 3.0. Python 3.0 will only be supported in mod_wsgi 3.0. Bug Fixes --------- 1. Change to workaround problem where correct version of Python framework isn't being found at run time and instead uses the standard system one, which may be the wrong version. Change is for those Python versions on MacOS X which include a .a in Python config directory, which should be symlinked to framework, link against the .a instead. For some reason, doing this results in framework then being picked up from the correct location. This problem may well have only started cropping up at some point due to a MacOS X Leopard patch update as has been noticed that Python frameworks installed previously stopped being found properly when mod_wsgi was subsequently recompiled against them. Something may therefore have changed in compiler tools suite. For more details see: https://code.google.com/archive/p/modwsgi/issues/28 2. Remove isatty from Log object used for stdout/stderr. It should have been a function and not an attribute. Even so, isatty() is not meant to be supplied by a file like object if it is associated with a file descriptor. Thus, packages which want to use isatty() are supposed to check for its existance before calling it. Thus wasn't ever mod_wsgi that was wrong in not supply this, but the packages which were trying to use it. For more details see: https://code.google.com/archive/p/modwsgi/issues/146 mod_wsgi-5.0.0/docs/release-notes/version-2.6.rst000066400000000000000000000051751452636074700215720ustar00rootroot00000000000000=========== Version 2.6 =========== Version 2.6 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.6.tar.gz For Windows binaries see: https://code.google.com/archive/p/modwsgi/wikis/InstallationOnWindows Note that this release does not support Python 3.0. Python 3.0 will only be supported in mod_wsgi 3.0. Note that the fix for (3) below is believed to have already been backported to mod_wsgi 2.5 in Debian Stable tree. Thus, if using mod_wsgi 2.5 from Debian you do not need to be concerned about upgrading to this version. Bug Fixes --------- 1. Fixed build issue on MacOS X where incorrect Python framework found at run time. This was caused by '-W,-l' option prefix being dropped from '-F' option in LDFLAGS of Makefile and not reverted back when related changes undone. This would affect Python 2.3 through 2.5. For more details see: https://code.google.com/archive/p/modwsgi/issues/28 2. Fixed build issue on MacOS X where incorrect Python framework found at run time. This was caused by '-L/-l' flags being used for versions of Python prior to 2.6. That approach, even where '.a' library link to framework exists, doesn't seem to work for the older Python versions. Because of the unpredictability as to when '-F/-framework' or '-L/-l' should be used for specific Python versions or distributions. Now always link against Python framework via '-F/-framework' if available. If for some particular setup this isn't working, then the '--disable-framework' option can be supplied to 'configure' script to force use of '-L/-l'. For more details see: https://code.google.com/archive/p/modwsgi/issues/28 3. Fixed bug where was decrementing Python object reference count on NULL pointer, causing a crash. This was possibly only occuring in embedded mode and only where closure of remote client connection was detected before any request content was read. The issue may have been more prevalent for a HTTPS connection from client. 4. Fixed bug for Python 2.X where when using 'print' to output multple objects to log object via, wsgi.errors, stderr or stdout, a space wasn't added to output between objects. This was occuring because log object lacked a softspace attribute. Features Changed ---------------- 1. When trying to determining version of Apache being used at build time, if Apache executable not available, fallback to getting version from the installed Apache header files. Do this as some Linux distributions build boxes do not actually have Apache executable itself installed, only the header files and apxs tool needed to build modules. For more details see: https://code.google.com/archive/p/modwsgi/issues/147 mod_wsgi-5.0.0/docs/release-notes/version-2.7.rst000066400000000000000000000012251452636074700215630ustar00rootroot00000000000000=========== Version 2.7 =========== Version 2.7 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.7.tar.gz Note that this release does not support Python 3.0. Python 3.0 will only be supported in mod_wsgi 3.0. Features Changed ---------------- 1. Set timeout on socket connection between Apache server child process and daemon process earlier to catch any blocking problems in initial handshake between the processes. This will make code more tolerant of any unexpected issues with socket communications. Bug Fixes --------- 1. Wasn't possible to set CFLAGS from environment variable when running the 'configure' script. mod_wsgi-5.0.0/docs/release-notes/version-2.8.rst000066400000000000000000000007541452636074700215720ustar00rootroot00000000000000=========== Version 2.8 =========== Version 2.8 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-2.8.tar.gz Bug Fixes --------- 1. Ensure that any compiler flags supplied via the CFLAGS environment variable when running 'configure' script are prefixed by '-Wc,' before being passed to 'apxs' to build module. Without this 'apxs' will incorrectly interpret the compiler options. For more details see: https://code.google.com/archive/p/modwsgi/issues/166 mod_wsgi-5.0.0/docs/release-notes/version-3.0.rst000066400000000000000000000452301452636074700215610ustar00rootroot00000000000000=========== Version 3.0 =========== Version 3.0 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-3.0.tar.gz Precompiled Windows binaries for Apache 2.2 and Python 2.6 and 3.1 are also available from: https://code.google.com/archive/p/modwsgi/downloads/list Note that mod_wsgi 3.0 was originally derived from mod_wsgi 2.0. It has though all changes from later releases in the 2.X branch. Thus also see: * :doc:`version-2.1` * :doc:`version-2.2` * :doc:`version-2.3` * :doc:`version-2.4` * :doc:`version-2.5` * :doc:`version-2.6` * :doc:`version-2.7` Bug Fixes --------- 1. Fix bug with quoting of options to mod_wsgi directives as described in: https://code.google.com/archive/p/modwsgi/issues/55 2. For any code not run in the first Python interpreter instance, thread local data was being thrown away at the end of the request, rather than persisting through to subsequent requests handled by the same thread. This prevented caching techniques which made use of thread local storage and where data was intended to persist for the life of the process. The result was that any such data would have had to have been recreated on every request. See: https://code.google.com/archive/p/modwsgi/issues/120 Features Changed ---------------- 1. No longer force a zero length read before sending response headers where Apache 2.2.8 or later is used. This was originally being done as a workaround because of bug in Apache whereby it didn't generate the '100 Continue' headers properly, with possibility they would be sent as part of response content. This problem was however fixed in Apache 2.2.7 (really 2.2.8 as 2.2.7 was never publically released by ASF). Also only allow zero length read to propogate to Apache input filters when done, if the zero length read is the very first read against the input stream. For details see: https://code.google.com/archive/p/modwsgi/issues/52 2. The WSGIImportScript can now appear inside of VirtualHost. However, there are now additional restrictions. First is that the WSGIDaemonProcess directive being referred to by the WSGIImportScript directive by way of the process-group option, must appear before the WSGIImportScript directive. Second is that the WSGIDaemonProcess directive being referred to by the WSGIImportScript directive by way of the process-group option, must appear in the same VirtualHost context, or at global server scope. It is not possible to reference a daemon process group specified in a different virtual server context. Third is that at global server context, it is not possible to refer to a daemon process group defined in a VirtualHost context. For additional details see: https://code.google.com/archive/p/modwsgi/issues/110 3. The restriction on accessing sys.stdin and sys.stdout has been lifted. This was originally done to promote the writing of portable WSGI code. In all the campaign has failed as people can't be bothered to read the documentation to understand why it was done and instead use the workaround and don't actually fix the code that isn't portable. More details at: http://blog.dscpl.com.au/2009/04/wsgi-and-printing-to-standard-output.html 4. Reenabled WSGIPythonHome directive in Windows as does apparently work so long as virtual environment setup correctly for it to refer to. 5. WSGI version now marked as WSGI 1.1 instead of 1.0. This is on basis that proposed ammendments to WSGI which mod_wsgi already implements will at least be accepted as WSGI 1.1 independent of any discussions of changing WSGI interface to use unicode with encoding other than Latin-1. 6. Set timeout on socket connection between Apache server child process and daemon process earlier to catch any blocking problems in initial handshake between the processes. This will make code more tolerant of any unexpected issues with socket communications. Features Removed ---------------- 1. The WSGIReloadMechanism directive has been removed. This means that script reloading is not available as an option in daemon mode and the prior default of process reloading always used, unless of course WSGIScriptReloadig is Off and all reloading is disabled. Doesn't affect embedded mode where script reloading was always the only option. For details see: https://code.google.com/archive/p/modwsgi/issues/72 2. There is no longer an attempt to set Content-Length header for a response if not supplied and iterable was a sequence of length 1. This was suggested by WSGI specification but turns out this causes problems with HEAD requests. For details see: http://blog.dscpl.com.au/2009/10/wsgi-issues-with-http-head-requests.html Note that Apache may still do the same thing in certain circumstances. Whether Apache always does the correct thing is not known. In general, a WSGI application should always return full response content for a HEAD request and should NOT truncate the response. Features Added -------------- 1. Support added for using Python 3.X. What constitutes support for Python 3.X is described in: https://code.google.com/archive/p/modwsgi/wikis/SupportForPython3X Note that Python 3.0 is not supported and cannot be used. You must use Python 3.1 or later as mod_wsgi relies on features only added in Python 3.1. The PSF has also affectively abandoned Python 3.0 now anyway. Also note that there is no official WSGI specification for Python 3.X and objections could be raised about what mod_wsgi has implemented. If that occurs then mod_wsgi may need to stop claiming to be WSGI compliant. 2. It is now possible to supply 'process-group', 'application-group', 'callable-object' and 'pass-authorization' configuration options to the WSGIScriptAlias and WSGIScriptAliasMatch directives after the location of the WSGI script file parameter. For example:: WSGIScriptAlias /trac /var/trac/apache/trac.wsgi \ process-group=trac-projects application-group=%{GLOBAL} Where the options are provided, these will take precedence over any which apply to the application as defined in Location or Directory configuration containers. For WSGIScriptAlias (but not WSGIScriptAliasMatch) where both 'process-group' and 'application-group' parameters are provided, and neither use expansion variables that can only be evaluated at the time of request handling, this will also cause the WSGI script file to be preloaded when the process starts, rather than being lazily loaded only when first request for application arrives. Preloading of the WSGI script is performed in the same way as when using the WSGIImportScript directive. The above configuration is therefore equivalent to existing, but longer way of doing it, as shown below:: WSGIScriptAlias /trac /var/trac/apache/trac.wsgi WSGIImportScript /var/trac/apache/trac.wsgi \ process-group=trac-projects application-group=%{GLOBAL} WSGIProcessGroup trac-projects WSGIApplicationGroup %{GLOBAL} Note that the WSGIDaemonProcess directive defining the daemon process group being referred to by the process-group option must preceed the WSGIScriptAlias directive in the configuration file. Further, you can only refer to a daemon process group referred to in the same VirtualHost context, or at global server scope. 3. When client closes connection and iterable returned from WSGI application being processed, now directly log message at debug level in log files, rather than raising a Python exception and with that being logged at error level as was previously the case. For where write() being called a Python exception still has to be raised and whether that results in any message being logged depends on what the WSGI application does. End result is that for normal case where LogLevel wouldn't be set to debug, the log file will not fill up with messages where client prematurely closes connection. For details see: https://code.google.com/archive/p/modwsgi/issues/29 4. Added new 'chroot' option to WSGIDaemonProcess directive to force daemon process to run inside of a chroot environment. For this to work you need to have a working Python installation installed into the chroot environment such that inside of that context it appears at same location as that which Apache/mod_wsgi is running. Note that the WSGI application code and any files it require have to be located within the chroot directory structure. In configuring mod_wsgi reference is then made to the WSGI application at that location. Thus:: WSGIDaemonProcess choot-1 user=grahamd group=staff display-name=%{GROUP} \ root=/some/path/chroot-1 WSGIScriptAlias /app /some/path/chroot-1/var/www/app/scripts/app.wsgi \ process-group=chroot-1 Normally this would result in Apache generating SCRIPT_FILENAME as the path as second argument to WSGIScriptAlias, but mod_wsgi, knowing it is a chroot environment will adjust that path and drop the chroot directory root from front of path so that it resolves correctly when used in context of chroot environmet. In other words, there is no need to create a parallel directory structure outside of chroot environment just to satisfy Apache URL mapper. Any static files can be in or outside of the chroot directory and will still be served by Apache child worker processes, which don't run in chroot environment. If user only has access to chroot environment through login shell that goes directly to it, then static files will obviously be inside. How to create a chroot environment will not be described here and you will want to know what you are doing if you want to use this feature. For some pointers to what may need to be done for Debian/Ubuntu see article at: http://transcyberia.info/archives/12-chroot-plone-buildouts.html For details on this change also see: https://code.google.com/archive/p/modwsgi/issues/106 5. Added WSGIPy3kWarningFlag directive when Python 2.6 being used. This should be at server scope outside of any VirtualHost and will apply to whole server:: WSGIPy3kWarningFlag On This should have same affect as -3 option to 'python' executable. For more details see: https://code.google.com/archive/p/modwsgi/issues/109 6: Fix up how Python thread state API is used to avoid internal Python assertion error when Python compiled with Py_DEBUG preprocessor symbol. For details see: https://code.google.com/archive/p/modwsgi/issues/113 7. Now allow chunked request content. Such content will be dechunked and available for reading by WSGI application. See: https://code.google.com/archive/p/modwsgi/issues/1 To enable this feature, you must use:: WSGIChunkedRequest On for appropriate context in Apache configuration. Do note however that WSGI is technically incapable of supporting chunked request content without all chunked request content having to be first read in and buffered. This is because WSGI requires CONTENT_LENGTH be set when there is any request content. In mod_wsgi no buffering is done. Thus, to be able to read the request content in the case of a chunked transfer encoding, you need to step outside of the WSGI specification and do things it says you aren't meant to. You have two choices for how you can do this. The first choice you have is to call read() on wsgi.input but not supply any argument at all. This will cause all request content to be read in and returned. The second is to loop on calling read() on wsgi.input with a set block size passed as argument and do this until read() returns an empty string. Because both calling methods are not allowed under WSGI specification, in using these your code will not be portable to other WSGI hosting mechanisms. 8. Values for HTTP headers now passed in environment dictionary to access, authentication and authorisation hooks. See: https://code.google.com/archive/p/modwsgi/issues/69 9. The flag wsgi.run_once is not set to True when running in daemon mode and both threads and maximum-requests is set to 1. With this configuration, are gauranteed that process will only be used once before being restarted. Note that don't get this gaurantee when multiple threads used as the maximum requests is only checked at end of successful request and so could feasibly still have multiple concurrent requests in progress at that point and so process wasn't used only once. 10. Added lazy initialisation of Python interpreter. That is, Python interpreter will not be initialised in Apache parent process and inherited across fork when creating child processes. Instead, the Python interpreter will only first be initialised in child process after the fork. This behaviour is now the default as Python 3.X by design doesn't cleanup memory when interpreter destroyed. This causes significant memory leaks into Apache parent process as not reclaiming the memory doesn't work well with fact that Apache will unload Python library on an Apache restart and loose references to that unclaimed memory, such that when Python is reinitialised, it can't reuse it. In Python 2.X it does attempt to reclaim all memory when Python interpreter is destroyed, but some Python versions still leak some memory due to real leaks or also perhaps by design as per Python 3.X. In Python 2.X the leaks are far less significant and have been tolerated in the past. The leaks in Python 2.X only cause problems if you do lots of Apache restarts, rather than stop/start. All the same, default for Python 2.X has also now been made to perform lazy initialisation. To control the behaviour have added the directive WSGILazyInitialization. This defaults to On for both Python 2.X and Python 3.X. If you wish to experiment with whether early initialisation gives better results for Python 2.X, you can set this directive to Off. The downside of performing lazy initialisation is that you may loose some benefit of being able to share memory between child process. Thus, child processes will potentially consume more resident memory than before due to data being local to process rather than potentially being shared. If you are exclusively using mod_wsgi daemon mode and not using embedded mode, if lazy initialisation is used in conjunction with WSGIRestrictEmbedded being set to On, then the Python interpreter will not be initialised at all in the Apache server child processes, unless authentication providers or other non content generation code is being provided to be executed in Apache server child processes. This means that Apache worker processes will be much smaller. Even when initialisation of Python in Apache worker processes is disabled, as before, the mod_wsgi daemon processes will still use more resident memory over shared memory. If however you are only running a small number of mod_wsgi daemon processes, then this may overall balance out as using less memory in total. For more details see: https://code.google.com/archive/p/modwsgi/issues/99 11. If daemon process defined in virtual host which has its own error log, then associated stderr with that virtual hosts error log instead. This way any messages sent direct to stderr from C extension modules will end up in the virtual host error log that the daemon process is associated with, rather than the main error log. 12. If daemon process defined in a virtual host, close all error logs for other virtual hosts which don't reference the same error log. This ensures that code can't write messages to error logs for another host, or reopen the log and read data from the logs. 13. Implement internal server redirection using Location response header as allowed for in CGI specification. Note though that this feature has only been implemented for mod_wsgi daemon mode. See: https://code.google.com/archive/p/modwsgi/issues/14 14. Implement WSGIErrorOverride directive which when set to On will result in Apache error documents being used rather than those passed back by the WSGI application. This allows error documents to match any web site that the WSGI application may be integrated as a part of. This feature is akin to the ProxyErrorOverride directive of Apache but for mod_wsgi only. Do note though that this feature has only been implemented for mod_wsgi daemon mode. See: https://code.google.com/archive/p/modwsgi/issues/57 15. Implement WSGIPythonWarnings directive as equivalent to the 'python' executable '-W' option. The directive can be used at global scope in Apache configuration to provide warning control strings to disable messages produced by the warnings module. For example:: # Ignore everything. WSGIPythonWarnings ignore or:: # Ignore only DeprecationWarning. WSGIPythonWarnings ignore::DeprecationWarning:: For more details see: https://code.google.com/archive/p/modwsgi/issues/137 16. Added cpu-time-limit option to WSGIDaemonProcess directive. This allows one to define a time in seconds which will be the maximum amount of cpu time the process is allowed to use before a shutdown is triggered and the daemon process restarted. The point of this is to provide some means of controlling potentially run away processes due to bad code that gets stuck in heavy processing loops. For more details see: https://code.google.com/archive/p/modwsgi/issues/21 17. Added cpu-priority option to WSGIDaemonProcess directive. This allows one to adjust the CPU priority associated with processes in a daemon process groups. The range of values that can be supplied is dictated by what the setpriority() function on your particular operating system accepts. Normally this is in the range of about -20 to 20, with 0 being normal. For more details see: https://code.google.com/archive/p/modwsgi/issues/142 18. Added WSGIHandlerScript directive. This allows one to nominate a WSGI script file that should be executed as a handler for a specific file type as configured within Apache. For example:: WSGIProcessGroup bobo WSGIApplicationGroup %{GLOBAL} MultiViewsMatch Handlers Options +ExecCGI AddHandler bobo-script .bobo WSGIHandlerScript bobo-script /some/path/bobo-handler/handler.wsgi For this example, the application within the WSGI script file will be invoked whenever a URL maps to a file with '.bobo' extension. The name of the file mapped to by the URL will be available in the 'SCRIPT_FILENAME' WSGI environment variable. Although same calling interface is used as a WSGI application, to distinguish that this is acted as a handler, the application entry point must be called 'handle_request' and not 'application'. When providing such a handler script, it is also possible to provide in the script file a 'reload_required' callable object. This will be called prior to handling a request and allows the script to determine if a reload should be performed first. In the case of daemon mode, this allows script to programmatically determine if the whole process should be reloaded first. The argument to the 'reload_required' function is the original resource file that was the target of the request and which would have been available to the handler as SCRIPT_FILENAME. mod_wsgi-5.0.0/docs/release-notes/version-3.1.rst000066400000000000000000000014621452636074700215610ustar00rootroot00000000000000=========== Version 3.1 =========== Version 3.1 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-3.1.tar.gz As this version follows on quickly from mod_wsgi 3.0, ensure you read: * :doc:`version-3.0` Bug Fixes --------- 1. Ensure that any compiler flags supplied via the CFLAGS environment variable when running 'configure' script are prefixed by '-Wc,' before being passed to 'apxs' to build module. Without this 'apxs' will incorrectly interpret the compiler options. For more details see: https://code.google.com/archive/p/modwsgi/issues/166 Features Changed ---------------- 1. Now give more explicit error message when compilation fails due to the Apache or Python developer header files not being installed. See: https://code.google.com/archive/p/modwsgi/issues/169 mod_wsgi-5.0.0/docs/release-notes/version-3.2.rst000066400000000000000000000032331452636074700215600ustar00rootroot00000000000000=========== Version 3.2 =========== Version 3.2 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-3.2.tar.gz Bug Fixes --------- 1. The path of the handler script was reported wrongly when WSGIHandlerScript was being used and an error occurred when loading the file. Rather than the handler script file being listed, the file to which the URL mapped was reported instead. 2. Fix problem with use of condition variables/thread mutexes that was causing all requests in daemon mode on a FreeBSD system to hang immediately upon Apache being started. https://code.google.com/archive/p/modwsgi/issues/176 Also use a distinct flag with condition variable in case condition variable is triggered even though condition not satisfied. This latter issue hasn't presented as a known problem, but technically a condition variable can by definition return even though not satisified. If this were to occur, undefined behaviour could result as multiple threads could listen on socket and/or accept connections on that socket at the same time. 3. Wrong check of APR_HAS_THREADS by preprocessor conditional resulting in code not compiling where APR_HAS_THREADS was defined but 0. 4. When Apache error logging redirected to syslog there is no error log associated with Apache server data structure to close. Code should always check that there is an error log to avoid crashing mod_wsgi daemon process on startup by operating on null pointer. See: https://code.google.com/archive/p/modwsgi/issues/178 5. Code was not compiling with Apache 2.3. This is because ap_accept_lock_mech variable was removed. See: https://code.google.com/archive/p/modwsgi/issues/186 mod_wsgi-5.0.0/docs/release-notes/version-3.3.rst000066400000000000000000000037261452636074700215700ustar00rootroot00000000000000=========== Version 3.3 =========== Version 3.3 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-3.3.tar.gz Bug Fixes --------- 1. Inactivity timeout not triggered at correct time when occurs for first request after process is started. See https://code.google.com/archive/p/modwsgi/issues/182 2. Back off timer for failed connections to daemon process group wasn't working correctly and no delay on reconnect attempts was being applied. See: https://code.google.com/archive/p/modwsgi/issues/195 3. Logging not appearing in Apache error log files when using daemon mode and have multiple virtual hosts against same server name. See: https://code.google.com/archive/p/modwsgi/issues/204 4. Eliminate logging of !KeyError exception in threading module when processes are shutdown when using Python 2.6.5 or 3.1.2 or later. This wasn't indicating any real problem but was annoying all the same. See: https://code.google.com/archive/p/modwsgi/issues/197 5. Fix potential for crash when logging error message resulting from failed group authorisation. 6. Fix compilation problems with Apache 2.3.6. Features Changed ---------------- 1. When compiled against ITK MPM for Apache, if using daemon mode, the listener socket for daemon process will be marked as being owned by the same user that daemon process runs. This will at least allow a request handled under ITK MPM to be directed to daemon process owned by same user as script. See issue: https://code.google.com/archive/p/modwsgi/issues/187 2. Add isatty() to log objects used for sys.stdout/sys.stderr and wsgi.errors. The Python documentation says 'If a file-like object is not associated with a real file, this method should not be implemented'. That however is ambiguous as to whether one can omit it, or whether one should raise an NotImplementedError exception. Either way, various code doesn't cope with isatty() not existing or failing, so implement it and have it return False to be safe. mod_wsgi-5.0.0/docs/release-notes/version-3.4.rst000066400000000000000000000230721452636074700215650ustar00rootroot00000000000000=========== Version 3.4 =========== Version 3.4 of mod_wsgi can be obtained from: http://modwsgi.googlecode.com/files/mod_wsgi-3.4.tar.gz Security Issues --------------- 1. Information disclosure via Content-Type response header. (CVE-2014-0242) The issue was identified and fixed in version 3.4 (August 2012) of mod_wsgi and is listed below at item 7 under 'Bugs Fixed'. Response Content-Type header could be corrupted when being sent in multithreaded configuration and embedded mode being used. Problem thus affected Windows and worker MPM on UNIX. At the time it was believed to be relatively benign, only ever having been seen with one specific web application (Trac - http://trac.edgewall.org), with the corrupted value always appearing to be replaced with a small set of known values which themselves did not raise concerns. A new example of this problem was identified May 2014 which opens this issue up as being able to cause arbitrary corruption of the web server HTTP response Content-Type value, resulting in possible exposure of data from the hosted web application to a HTTP client. The new example also opens the possibility that the issue can occur with any Apache MPM and not just multithreaded MPMs as previously identified. Albeit that it still requires some form of background application threads to be in use, when a single threaded Apache MPM is being used. In either case, it is still however restricted to the case where embedded mode of mod_wsgi is being used. The specific scenario which can trigger the issue is where the value for the Content-Type response header is dynamically generated, and where the stack frame where the calculation was done went out of use between the time that the WSGI start_response() function was called and the first non empty byte string was yielded from the WSGI application for the response, resulting in the Python object being destroyed and memory returned to the free list. At the same time, it would have been necessary for a parallel request thread or an application background thread to execute during that window of time and perform sufficient object allocations so as to reuse the memory previously used by the value of the Content-Type response header. Example code which can be used to trigger the specific scenario can be found at: https://gist.github.com/GrahamDumpleton/14b31ebe18166a89b090 That example code also provides a workaround if you find yourself affected by the issue but cannot upgrade straight away. It consists of the @intern_content_type decorator/wrapper. This can be applied to the WSGI application entry point and will use a cache to store the value of the Content-Type response header to ensure it is persistent for the life of the request. Bugs Fixed ---------- 1. If using write() function returned by start_response() and a non string value is passed to it, then process can crash due to errors in Python object reference counting in error path of code. 2. If using write() function returned by start_response() under Python 3.X and a Unicode string is passed to it rather than a byte string, then a memory leak will occur because of errors in Python object reference counting. 3. Debug level log message about mismatch in content length generated was generated when content returned less than that specified by Content-Length response header even when exception occurring during response generation from an iterator. In the case of an exception occuring, was only meant to generate the log message if more content returned than defined by the Content-Length response header. 4. Using writelines() on wsgi.errors was failing. 5. If a UNIX signal received by daemon mode process while still being initialised to signal that it should be shutdown, the process could crash rather than shutdown properly due to not registering the signal pipe prior to registering signal handler. 6. Python doesn't initialise codecs in sub interpreters automatically which in some cases could cause code running in WSGI script to fail due to lack of encoding for Unicode strings when converting them. The error message in this case was:: LookupError: no codec search functions registered: can't find encoding The 'ascii' encoding is now forcibly loaded when initialising sub interpreters to get Python to initialise codecs. 7. Response Content-Type header could be corrupted when being sent in multithreaded configuration and embedded mode being used. Problem thus affected Windows and worker MPM on UNIX. Features Changed ---------------- 1. The HTTPS variable is no longer set within the WSGI environment. The authoritative indicator of whether a SSL connection is used is wsgi.url_scheme and a WSGI compliant application should check for wsgi.url_scheme. The only reason that HTTPS was supplied at all was because early Django versions supporting WSGI interface weren't correctly using wsgi.url_scheme. Instead they were expecting to see HTTPS to exist. This change will cause non conformant WSGI applications to finally break. This possibly includes some Django versions prior to Django version 1.0. Note that you can still set HTTPS in Apache configuration using the !SetEnv or !SetEnvIf directive, or via a rewrite rule. In that case, that will override what wsgi.url_scheme is set to and once wsgi.url_scheme is set appropriately, the HTTPS variable will be removed from the set of variables passed through to the WSGI environment. 2. The wsgi.version variable has been reverted to 1.0 to conform to the WSGI PEP 3333 specification. It was originally set to 1.1 on expectation that revised specification would use 1.1 but that didn't come to be. 3. Use of kernel sendfile() function by wsgi.file_wrapper is now off by default. This was originally always on for embedded mode and completely disabled for daemon mode. Use of this feature can be enabled for either mode using WSGIEnableSendfile directive, setting it to On to enable it. The default is now off because kernel sendfile() is not always able to work on all file objects. Some instances where it will not work are described for the Apache !EnableSendfile directive. http://httpd.apache.org/docs/2.2/mod/core.html#enablesendfile Although Apache has use of sendfile() enabled by default for static files, they are moving to having it off by default in future version of Apache. This change is being made because of the problems which arise and users not knowing how to debug it and solve it. Thus also erring on side of caution and having it off by default but allowing more knowledgeable users to enable it where they know always using file objects which will work with sendfile(). New Features ------------ 1. Support use of Python 3.2. 2. Support use of Apache 2.4. 3. Is now guaranteed that mod_ssl access handler is run before that for mod_wsgi so that any per request variables setup by mod_ssl are available in the mod_wsgi access handler as implemented by WSGIAccessScript directive. 4. Added 'python-home' option to WSGIDaemonProcess allowing a Python virtual environment to be used directly in conjunction with daemon process. Note that this option does not do anything if setting WSGILazyInitialization to 'Off'. 5. Added 'lang' and 'locale' options to WSGIDaemonProcess to perform same tasks as setting 'LANG' and 'LC_ALL environment' variables. Note that if needing to do the same for embedded mode you still need to set the environment variables in the Apache envvars file or init.d startup scripts. 6. Split combined WWW-Authenticate header returned from daemon process back into separate headers. This is work around for some browsers which require separate headers when multiple authentication providers exist. 7. For Python 2.6 and above, the WSGIDontWriteBytecode directive can be used at global scope in Apache configuration to disable writing of all byte code files, ie., .pyc, by the Python interpreter when it imports Python code files. To disable writing of byte code files, set directive to 'On'. Note that this doesn't prevent existing byte code files on disk being used in preference to the corresponding Python code files. Thus you should first remove .pyc files from web application directories if relying on this option to ensure that .py file is always used. 8. Add supplementary-groups option to WSGIDaemonProcess to allow group membership to be overridden and specified comma separated list of groups to be used instead. 9. Add 'memory-limit' option to WSGIDaemonProcess to allow memory usage of daemon processes to be restricted. This will have no affect on some platforms as RLIMIT_AS/RLIMIT_DATA with setrlimit() isn't always implemented. For example MacOS X and older Linux kernel versions do not implement this feature. You will need to test whether this feature works or not before depending on it. 10. Add 'virtual-memory-limit' option to WSGIDaemonProcess to allow virtual memory usage of daemon processes to be restricted. This will have no affect on some platforms as RLIMIT_VMEM with setrlimit() isn't always implemented. You will need to test whether this feature works or not before depending on it. 11. Access, authentication and authorisation hooks now have additional keys in the environ dictionary for 'mod_ssl.is_https' and 'mod_ssl.var_lookup'. These equate to callable functions provided by mod_ssl for determining if the client connection to Apache used SSL and what the values of variables specified in the SSL certifcates, server or client, are. These are only available if Apache 2.0 or later is being used. 12. Add 'mod_wsgi.queue_start' attribute to WSGI environ so tools like New Relic can use it to track request queueing time. This is the time between when request accepted by Apache and when handled by WSGI application. mod_wsgi-5.0.0/docs/release-notes/version-3.5.rst000066400000000000000000000070511452636074700215650ustar00rootroot00000000000000=========== Version 3.5 =========== Version 3.5 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/3.5.tar.gz Security Issues --------------- 1. Local privilege escalation when using daemon mode. (CVE-2014-0240) The issue is believed to affect Linux systems running kernel versions >= 2.6.0 and < 3.1.0. The issue affects all versions of mod_wsgi up to and including version 3.4. The source of the issue derives from mod_wsgi not correctly handling Linux specific error codes from setuid(), which differ to what would be expected to be returned by UNIX systems conforming to the Open Group UNIX specification for setuid(). * http://man7.org/linux/man-pages/man2/setuid.2.html * http://pubs.opengroup.org/onlinepubs/009695399/functions/setuid.html This difference in behaviour between Linux and the UNIX specification was believed to have been removed in version 3.1.0 of the Linux kernel. * https://groups.google.com/forum/?fromgroups=#!topic/linux.kernel/u6cKf4D1D-k The issue would allow a user, where Apache is initially being started as the root user and where running code under mod_wsgi daemon mode as an unprivileged user, to manipulate the number of processes run by that user to affect the outcome of setuid() when daemon mode processes are forked and so gain escalated privileges for the users code. Due to the nature of the issue, if you provide a service or allow untrusted users to run Python web applications you do not control the code for, and do so using daemon mode of mod_wsgi, you should update mod_wsgi as soon as possible. Bugs Fixed ---------- 1. Python 3 installations can add a suffix to the Python library. So instead of ``libpythonX.Y.so`` it can be ``libpythonX.Ym.so``. 2. When using daemon mode, if an uncaught exception occurred when handling a request, when response was proxied back via the Apache child process, an internal value for the HTTP status line was not cleared correctly. This was resulting in a HTTP status in response to client of '200 Error' rather than '500 Internal Server Error'. Note that this only affected the status line and not the actual HTTP status. The status would still be 500 and the client would still interpret it as a failed request. 3. Null out Apache scoreboard handle in daemon processes for Apache 2.4 to avoid process crash when lingering close cleanup occurs. 4. Workaround broken MacOS X XCode Toolchain references in Apache apxs build configuration tool and operating system libtool script. This means it is no longer necessary to manually go into:: Applications/Xcode.app/Contents/Developer/Toolchains and manually add symlinks to define the true location of the compiler tools. 5. Restore ability to compile mod_wsgi source code under Apache 1.3. 6. Fix checks for whether the ITK MPM is used and whether ITK MPM specific actions should be taken around the ownership of the mod_wsgi daemon process listener socket. 7. Fix issue where when using Python 3.4, mod_wsgi daemon processes would actually crash when the processes were being shutdown. 8. Made traditional library linking the default on MacOS X. If needing framework style linking for the Python framework, then use the ``--enable-framework`` option. The existing ``--disable-framework`` has now been removed given that the default action has been swapped around. New Features ------------ 1. For Linux 2.4 and later, enable ability of daemon processes to dump core files when Apache ``CoreDumpDirectory`` directive used. 2. Attempt to log whether daemon process exited normally or was killed off by an unexpected signal. mod_wsgi-5.0.0/docs/release-notes/version-4.0.rst000066400000000000000000000011001452636074700215460ustar00rootroot00000000000000=========== Version 4.0 =========== Due to version 4.0 of mod_wsgi being in development for so long, or not in development depending on how you want to look at it, and the confusion that might be caused by releasing what was sitting in the source code repository after so long, the version 4.0 moniker has been dropped. The next version after the 3.X series will therefore be version 4.1.0. With the introduction of version 4.1.0, a switch is also being made to a X.Y.Z version numbering scheme, in place of the existing X.Y version numbering scheme. * :doc:`version-4.1.0` mod_wsgi-5.0.0/docs/release-notes/version-4.1.0.rst000066400000000000000000000367131452636074700217270ustar00rootroot00000000000000============= Version 4.1.0 ============= With version 4.1.0 of mod_wsgi, a switch to a X.Y.Z version numbering scheme from the existing X.Y scheme is being made. This is to enable a much quicker release cycle with more incremental changes. Version 4.1.0 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.1.0.tar.gz Note that mod_wsgi 4.1.0 was originally derived from mod_wsgi 3.1. It has though all changes from later releases in the 3.X branch. Thus also see: * :doc:`version-3.2` * :doc:`version-3.3` * :doc:`version-3.4` * :doc:`version-3.5` Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. If a UNIX signal received by daemon mode process while still being initialised to signal that it should be shutdown, the process could crash rather than shutdown properly due to not registering the signal pipe prior to registering signal handler. 2. Python doesn't initialise codecs in sub interpreters automatically which in some cases could cause code running in WSGI script to fail due to lack of encoding for Unicode strings when converting them. The error message in this case was:: LookupError: no codec search functions registered: can't find encoding The 'ascii' encoding is now forcibly loaded when initialising sub interpreters to get Python to initialise codecs. 3. Fixed reference counting bug under Python 3 in SSL ``var_lookup()`` function which can be used from an auth handler to look up SSL variables. 4. The ``WWW-Authenticate`` headers returned from a WSGI application when run under daemon mode are now always preserved as is. Because of previously using an internal routine of Apache, way back in time the values of multiple ``WWW-Authenticate`` headers would be merged when there was more than one. This would cause an issue with some browsers. A workaround was subsequently implemented above the Apache routine to break apart the merged header to create separate ones again, however, if the value of a header validly had a ',' in it, this would cause the header value to be broken apart where it wasn't meant to. This could issues with some type of ``WWW-Authenticate`` headers. Features Removed ---------------- 1. No longer support the use of mod_python in conjunction with mod_wsgi. When this is attempted an error is forced and Apache will not be able to start. An error message is logged in main Apache error log. 2. No longer support the use of Apache 1.3. Minimum requirement is now Apache 2.0. Features Changed ---------------- 1. Use of kernel ``sendfile()`` function by ``wsgi.file_wrapper`` is now off by default. This was originally always on for embedded mode and completely disabled for daemon mode. Use of this feature can be enabled for either mode using ``WSGIEnableSendfile`` directive, setting it to ``On`` to enable it. The default is now off because kernel ``sendfile()`` is not always able to work on all file objects. Some instances where it will not work are described for the Apache ``EnableSendfile`` directive. http://httpd.apache.org/docs/2.2/mod/core.html#enablesendfile Although Apache has use of ``sendfile()`` enabled by default for static files, they are moving to having it off by default in future version of Apache. This change is being made because of the problems which arise and users not knowing how to debug it and solve it. Thus also erring on side of caution and having it off by default but allowing more knowledgeable users to enable it where they know always using file objects which will work with ``sendfile()``. 2. The ``HTTPS`` variable is no longer set within the WSGI environment. The authoritative indicator of whether a SSL connection is used is ``wsgi.url_scheme`` and a WSGI compliant application should check for ``wsgi.url_scheme``. The only reason that ``HTTPS`` was supplied at all was because early Django versions supporting WSGI interface weren't correctly using ``wsgi.url_scheme``. Instead they were expecting to see ``HTTPS`` to exist. This change will cause non conformant WSGI applications to finally break. This possibly includes some Django versions prior to Django version 1.0. Note that you can still set ``HTTPS`` in Apache configuration using the ``SetEnv`` or ``SetEnvIf`` directive, or via a rewrite rule. In that case, that will override what ``wsgi.url_scheme`` is set to and once ``wsgi.url_scheme`` is set appropriately, the ``HTTPS`` variable will be removed from the set of variables passed through to the WSGI environment. 3. The ``wsgi.version`` variable has been reverted to 1.0 to conform to the WSGI PEP 3333 specification. It was originally set to 1.1 on expectation that revised specification would use 1.1 but that didn't come to be. 4. The ``inactivity-timeout`` option to ``WSGIDaemonProcess`` now only results in the daemon process being restarted after the idle timeout period where there are no active requests. Previously it would also interrupt a long running request. See the new ``request-timeout`` option for a way of interrupting long running, potentially blocked requests and restarting the process. 5. If the ``home`` option is used with ``WSGIDaemonProcess``, in addition to that directory being made the current working directory for the process, an empty string will be added to the start of the Python module search path. This causes Python to look in the current working directory for Python modules when they are being imported. This behaviour brings things into line with what happens when running the Python interpreter from the command line. You must though be using the ``home`` option for this to come into play. Do not that if your application then changes the working directory, it will start looking in the new current working directory and not that which is specified by the ``home`` option. This again mirrors what the normal Python command line interpreter does. New Features ------------ 1. Add ``supplementary-groups`` option to ``WSGIDaemonProcess`` to allow group membership to be overridden and specified comma separate list of groups used instead. 2. Add a ``graceful-timeout`` option to ``WSGIDaemonProcess``. This option is applied in a number of circumstances. When ``maximum-requests`` and this option are used together, when maximum requests is reached, rather than immediately shutdown, potentially interupting active requests if they don't finished with shutdown timeout, can specify a separate graceful shutdown period. If the all requests are completed within this time frame then will shutdown immediately, otherwise normal forced shutdown kicks in. In some respects this is just allowing a separate shutdown timeout on cases where requests could be interrupted and could avoid it if possible. When ``cpu-time-limit`` and this option are used together, when CPU time limit reached, rather than immediately shutdown, potentially interupting active requests if they don't finished with shutdown timeout, can specify a separate graceful shutdown period. 3. Add potentially graceful process restart option for daemon processes when sent a graceful restart signal. Signal is usually ``SIGUSR1`` but is platform dependent as using same signal as Apache would use. If the ``graceful-timeout`` option had been provided to ``WSGIDaemonProcess``, then the process will attempt graceful shutdown first based on the that timeout, otherwise normal shutdown procedure used as if received a ``SIGTERM``. 4. Add ``memory-limit`` option to ``WSGIDaemonProcess`` to allow memory usage of daemon processes to be restricted. This will have no affect on some platforms as ``RLIMIT_AS``/``RLIMIT_DATA`` with ``setrlimit()`` isn't always implemented. For example MacOS X and older Linux kernel versions do not implement this feature. You will need to test whether this feature works or not before depending on it. 5. Add ``virtual-memory-limit`` option to ``WSGIDaemonProcess`` to allow virtual memory usage of daemon processes to be restricted. This will have no affect on some platforms as ``RLIMIT_VMEM`` with ``setrlimit()`` isn't always implemented. You will need to test whether this feature works or not before depending on it. 6. Access, authentication and authorisation hooks now have additional keys in the environ dictionary for ``mod_ssl.is_https`` and ``mod_ssl.var_lookup``. These equate to callable functions provided by ``mod_ssl`` for determining if the client connection to Apache used SSL and what the values of variables specified in the SSL certifcates, server or client, are. These are only available if Apache 2.0 or later is being used. 7. For Python 2.6 and above, the ``WSGIDontWriteBytecode`` directive can be used at global scope in Apache configuration to disable writing of all byte code files, ie., .pyc, by the Python interpreter when it imports Python code files. To disable writing of byte code files, set directive to ``On``. Note that this doesn't prevent existing byte code files on disk being used in preference to the corresponding Python code files. Thus you should first remove ``.pyc`` files from web application directories if relying on this option to ensure that ``.py`` file is always used. 8. Add ``request-timeout`` option to ``WSGIDaemonProcess`` to allow a separate timeout to be applied on how long a request is allowed to run for before the daemon process is automatically restarted to interrupt the request. This is to counter the possibility that a request may become blocked on some backend service, thereby using up available requests threads and preventing other requests to be handled. In the case of a single threaded process, then the timeout will happen at the specified time duration from the start of the request being handled. Applying such a timeout in the case of a multithreaded process is more problematic as doing a restart when a single requests exceeds the timeout could unduly interfere with with requests which just commenced. In the case of a multi threaded process, what is instead done is to take the total of the current running time of all requests and divide that by the number of threads handling requests in that process. When this average time exceeds the time specified, then the process will be restarted. This strategy for a multithreaded process means that individual requests can actually run longer than the specified timeout and a restart will only be performed when the overall capacity of the processes appears to be getting consumed by a number of concurrent long running requests, or when a specific requests has been blocked for an excessively long time. The intent of this is to allow the process to still keep handling requests and only perform a restart when the available capacity of the process to handle more requests looks to be potentially on the decline. 9. Add ``connect-timeout`` option to ``WSGIDaemonProcess`` to allow a timeout to be specified on how long the Apache child worker processes should wait on being able to obtain a connection to the mod_wsgi daemon process. As UNIX domain sockets are used, connections should always succeed, however there have been some incidences seen which could only be explained by the operating system hanging on the initial connect call without being added to the daemon process socket listener queue. As such the timeout has been added. The timeout defaults to 15 seconds. This timeout also now dictates how long the Apache child worker process will attempt to get a connection to the daemon process when the connection is refused due to the daemon socket listener queue being full. Previously how long connection attempts were tried was based on an internal retry count rather than a configurable timeout. 10. Add ``socket-timeout`` option to ``WSGIDaemonProcess`` to allow the timeout on indvidual read/writes on the socket connection between the Apache child worker and the daemon process to be specified separately to the Apache ``Timeout`` directive. If this option is not specified, it will default to the value of the Apache ``Timeout`` directive. 11. Add ``queue-timeout`` option to ``WSGIDaemonProcess`` to allow a request to be aborted if it never got handed off to a mod_wsgi daemon process within the specified time. When this occurs a '503 Service Unavailable' response will be returned. This is to allow one to control what to do when backlogging of requests occurs. If the daemon process is overloaded and getting behind, then it is more than likely that a user will have given up on the request anyway if they have to wait too long. This option allows you to specify that a request that was queued up waiting for too long is discarded, allowing any transient backlog to be quickly discarded and not simply cause the daemon process to become even more backlogged. 12. Add ``listen-backlog`` option to ``WSGIDaemonProcess`` to allow the daemon process socket listener backlog size to be specified. By default this limit is 100, although this is actually a hint, as different operating systems can have different limits on the maximum value or otherwise treat it in special ways. 13. Add ``WSGIPythonHashSeed`` directive to allow Python behaviour related to initial hash seed to be overridden when the interpreter supports it. This is equivalent to setting the ``PYTHONHASHSEED`` environment variable and should be set to either ``random`` or a number in the range in range ``[0; 4294967295]``. 14. Implemented a new streamlined way of installing mod_wsgi as a Python package using a setup.py file or from PyPi. This includes a ``mod_wsgi-express`` script that can then be used to start up Apache/mod_wsgi with an auto generated configuration on port 8000. This makes it easy to run up Apache for development without interfering with the main Apache on the system and without having to worry about configuring Apache. Command line options can be used to override behaviour. Once the ``mod_wsgi`` package has been installed into your Python installation, you can run:: mod_wsgi-express start-server Then open your browser on the listed URL. This will verify that everything is working. Enter CTRL-C to exit the server and shut it down. You can now point it at a specific WSGI application script file:: mod_wsgi-express start-server wsgi.py For options run:: mod_wsgi-express start-server --help If you already have another web server running on port 8000, you can override the port to be used using the ``--port`` option:: mod_wsgi-express start-server wsgi.py --port 8001 15. Implemented a Django application plugin to add a ``runmodwsgi`` command to the Django management command script. This allows the automatic run up of the new mod_wsgi express script, with it hosting the Django web site the plugin was added to. To enable, once the ``mod_wsgi`` package has been installed into your Python installation, add ``mod_wsgi.server`` to the ``INSTALLED_APPS`` setting in your Django settings file. After having run the ``collectstatic`` Django management command, you can then run:: python manage.py runmodwsgi For options run:: python manage.py runmodwsgi --help To enable automatic code reloading in a development setting, use the option:: python manage.py runmodwsgi --reload-on-changes 16. The maximum size that a response header/value can be that is returned from a WSGI application under daemon mode can now be configured. The default size has also now been increased from 8192 bytes to 32768 bytes. The name of the option to ``WSGIDaemonProcess`` to set the buffer size used is ``header-buffer-size``. mod_wsgi-5.0.0/docs/release-notes/version-4.1.1.rst000066400000000000000000000011221452636074700217120ustar00rootroot00000000000000============= Version 4.1.1 ============= Version 4.1.1 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.1.1.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. Compilation would fail on Apache 2.4 due to a change in the Apache API to determine the name of the MPM being used. mod_wsgi-5.0.0/docs/release-notes/version-4.1.2.rst000066400000000000000000000026621452636074700217250ustar00rootroot00000000000000============= Version 4.1.2 ============= Version 4.1.2 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.1.2.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. The integration for Django management command was looking for the wrong name for the admin script to start mod_wsgi express. 2. The code which connected to the mod_wsgi daemon process was passing an incorrect size into the connect() call for the size of the address structure. On some Linux systems this would cause an error similar to:: (22)Invalid argument: mod_wsgi (pid=22944): Unable to connect to \ WSGI daemon process 'localhost:8000' on \ '/tmp/mod_wsgi-localhost:8000:12145/wsgi.22942.0.1.sock' This issue was only introduced in 4.1.0 and does not affect older versions. 3. The deadlock detection thread could try and acquire the Python GIL after the Python interpreter had been destroyed on Python shutdown resulting in the process crashing. This issue cannot be completely eliminated, but the deadlock thread will now at least check whether the flag indicating process shutdown is happening has been set before trying to acquire the Python GIL. mod_wsgi-5.0.0/docs/release-notes/version-4.1.3.rst000066400000000000000000000013261452636074700217220ustar00rootroot00000000000000============= Version 4.1.3 ============= Version 4.1.3 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.1.3.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. The ``setup.py`` file wasn't always detecting the Python library version suffix properly when setting it up to be linked into the resulting ``mod_wsgi.so``. This would cause an error message at link time of:: /usr/bin/ld: cannot find -lpython mod_wsgi-5.0.0/docs/release-notes/version-4.2.0.rst000066400000000000000000000033511452636074700217200ustar00rootroot00000000000000============= Version 4.2.0 ============= Version 4.2.0 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.0.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. New Features ------------ 1. Added ``mod_wsgi.server_metrics()`` function which provides access to a dictionary of data derived from the Apache worker scoreboard. In effect this provides access to the same information that is used to create the Apache server status page. Note that if ``mod_status`` is not loaded into Apache, or the compile time configuration of Apache prohibits the scoreboard from being available, this function will return ``None``. Also be aware that only partial information about worker status, and no information about requests, will be returned if the ``ExtendedStatus`` directive is not also set to ``On``. Although ``mod_status`` needs to be loaded, it is not necessary to enable any URL to expose the server status page. 2. Added support for a platform plugin for New Relic to ``mod_wsgi-express`` which will report server status information up to New Relic if the ``--with-newrelic`` option is supplied when running mod_wsgi express. That same option also enables the New Relic Python agent. If you only want one or the other, you can instead use the ``--with-newrelic-agent`` and ``--with-newrelic-platform`` options. The feature of ``mod_wsgi-express`` for reporting data up to the New Relic Platform is dependent upon the separate ``mod_wsgi-metrics`` package being installed. mod_wsgi-5.0.0/docs/release-notes/version-4.2.1.rst000066400000000000000000000012171452636074700217200ustar00rootroot00000000000000============= Version 4.2.1 ============= Version 4.2.1 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.1.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. The auto generated configuration would not work with an Apache installation where core Apache modules were statically compiled into Apache rather than being dynamically loaded. mod_wsgi-5.0.0/docs/release-notes/version-4.2.2.rst000066400000000000000000000016221452636074700217210ustar00rootroot00000000000000============= Version 4.2.2 ============= Version 4.2.2 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.2.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. The ``envvars`` file was being overwritten even if it existed and had been modified. New Features ------------ 1. Output the location of the ``envvars`` file when using the ``setup-server`` command for ``mod_wsgi-express`` or if using the ``start-server`` command and the ``--envars-script`` option was being used. 2. Output the location of the ``apachectl`` script when using the ``setup-server`` command for ``mod_wsgi-express``. mod_wsgi-5.0.0/docs/release-notes/version-4.2.3.rst000066400000000000000000000011341452636074700217200ustar00rootroot00000000000000============= Version 4.2.3 ============= Version 4.2.3 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.3.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. The feature for starting mod_wsgi express using the Django management command ``runmodwsgi`` was broken by the 4.2.2 release. mod_wsgi-5.0.0/docs/release-notes/version-4.2.4.rst000066400000000000000000000024731452636074700217300ustar00rootroot00000000000000============= Version 4.2.4 ============= Version 4.2.4 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.4.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. Fixed one off error in applying limit to the number of supplementary groups allowed for a daemon process group. The result could be that if more groups than the operating system allowed were specified to the option ``supplementary-groups``, then memory corruption or a process crash could occur. 2. Improved error handling in setting up the current working directory and group access rights for a process when creating a daemon process group. The change means that if any error occurs that the daemon process group will be restarted rather than allow it to keep running with an incorrect working directory or group access rights. New Features ------------ 1. Added the ``--setup-only`` option to mod_wsgi express so that it is possible to create the configuration when using the Django management command ``runmodwsgi`` without actually starting the server. mod_wsgi-5.0.0/docs/release-notes/version-4.2.5.rst000066400000000000000000000012341452636074700217230ustar00rootroot00000000000000============= Version 4.2.5 ============= Version 4.2.5 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.5.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. When using Apache 2.4 with dynamically loaded MPM modules, mod_wsgi express was incorrectly trying to load more than one MPM module if more than one existed in the Apache modules directory. mod_wsgi-5.0.0/docs/release-notes/version-4.2.6.rst000066400000000000000000000013661452636074700217320ustar00rootroot00000000000000============= Version 4.2.6 ============= Version 4.2.6 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.6.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. Apache 2.2.3 and older doesn't provide the ap_get_server_description() function. Using mod_wsgi with such older versions would therefore cause processes to crash when Apache was being started up. For older versions of Apache now fallback to using ap_get_server_version() instead. mod_wsgi-5.0.0/docs/release-notes/version-4.2.7.rst000066400000000000000000000012201452636074700217200ustar00rootroot00000000000000============= Version 4.2.7 ============= Version 4.2.7 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.7.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. New Features ------------ 1. Added a ``--mount-point`` option to ``mod_wsgi-express`` to allow a WSGI application to be mounted at a sub URL rather than the root of the site when using mod_wsgi express. mod_wsgi-5.0.0/docs/release-notes/version-4.2.8.rst000066400000000000000000000034201452636074700217250ustar00rootroot00000000000000============= Version 4.2.8 ============= Version 4.2.8 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.8.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. Disable feature for dumping stack traces on daemon process shutdown when a timeout occurs when using Python prior to 2.5. This is because the C API functions are not available in older Python versions. 2. If using Python 3.4 the minimum MacOS X version you can use is 10.8. This needs to be inforced as Apache Runtime library has a definition in header files which changes sizes from 10.7 to 10.8 and trying to compile for compatability back to 10.6 as Python 3.4 tries to enforce, will cause mod_wsgi daemon mode processes to crash at runtime. 3. Python 3.3+ pyvenv style virtual environments would not work with mod_wsgi via the ``WSGIPythonHome`` directive or the ``home`` option to the ``WSGIDaemonProcess`` directive. This is because the support in Python for pyvenv will not work with embedded systems which set the equivalent of ``PYTHONHOME`` via the Python C API. The underlying problem in Python is described in issue: * http://bugs.python.org/issue22213 of the Python issue tracer. To support both normal virtualenv style virtual environments and pyvenv style virtual environments, the manner in which virtual environments are setup by mod_wsgi has been changed. This has at this point only been done on UNIX systems however, as it isn't known at this point whether the same trick will work on Windows systems. mod_wsgi-5.0.0/docs/release-notes/version-4.3.0.rst000066400000000000000000000250351452636074700217240ustar00rootroot00000000000000============= Version 4.3.0 ============= Version 4.3.0 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.3.0.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. Performing authorization using the ``WSGIAuthGroupScript`` was not working correctly on Apache 2.4 due to changes in how auth providers and authentication/authorization works. The result could be that a user could gain access to a resource even though they were not in the required group. 2. Under Apache 2.4, when creating the ``environ`` dictionary for passing into access/authentication/authorisation handlers, the behvaiour of Apache 2.4 as it pertained to the WSGI application, whereby it blocked the passing of any HTTP headers with a name which did not contain just alphanumerics or '-', was not being mirrored. This created the possibility of HTTP header spoofing in certain circumstances. Such headers are now being ignored. 3. When ``home`` option was used with ``WSGIDaemonProcess`` directive an empty string was added to ``sys.path``. This meant current working directory would be searched. This was fine so long as the current working directory wasn't changed, but if it was, it would no longer look in the home directory. Need to use the actual home directory instead. 4. Fixed Django management command integration so would work for versions of Django prior to 1.6 where ``BASE_DIR`` didn't exist in Django settings module. Features Changed ---------------- 1. In Apache 2.4, any headers with a name which does not include only alphanumerics or '-' are blocked from being passed into a WSGI application when the CGI like WSGI ``environ`` dictionary is created. This is a mechanism to prevent header spoofing when there are multiple headers where the only difference is the use of non alphanumerics in a specific character position. This protection mechanism from Apache 2.4 is now being restrospectively applied even when Apache 2.2 is being used and even though Apache itself doesn't do it. This may technically result in headers that were previously being passed, no longer being passed. The change is also technically against what the HTTP RFC says is allowed for HTTP header names, but such blocking would occur in Apache 2.4 anyway due to changes in Apache. It is also understood that other web servers such as nginx also perform the same type of blocking. Reliance on HTTP headers which use characters other than alphanumerics and '-' is therefore dubious as many servers will now discard them when needing to be passed into a system which requires the headers to be passed as CGI like variables such as is the case for WSGI. 2. In Apache 2.4, only ``wsgi-group`` is allowed when using the ``Require`` directive for group authorisation. In prior Apache versions ``group`` would also be accepted and matched by the ``wsgi`` auth provider. The inability to use ``group`` is due to a change in Apache itself and not mod_wsgi. To avoid any issues going forward though, the mod_wsgi code will now no longer check for ``group`` even if for some reason Apache still decides to pass the authorisation check off to mod_wsgi even when it shouldn't. New Features ------------ 1. The value of the ``REMOTE_USER`` variable for an authenticated user when user ``Basic`` authentication can now be overridden from an authentication handler specified using the ``WSGIAuthUserScript``. To override the name used to identify the user, instead of returning ``True`` when indicating that the user is allowed, return the name to be used for that user as a string. That value will then be passed through in ``REMOTE_USER`` in place of any original value:: def check_password(environ, user, password): if user == 'spy': if password == 'secret': return 'grumpy' return False return None 2. Added the ``--debug-mode`` option to ``mod_wsgi-express`` which results in Apache and the WSGI application being run in a single process which is left attached to stdin/stdout of the shell where the script was run. Only a single thread will be used to handle any requests. This feature enables the ability to interactively debug a Python WSGI application using the Python debugger (``pdb``). The simplest way to break into the Python debugger is by adding to your WSGI application code:: import pdb; pdb.set_trace() 3. Added the ``--application-type`` option to ``mod_wsgi-express``. This defaults to ``script`` indicating that the target WSGI application provided to ``mod_wsgi-express`` is a WSGI script file defined by a relative or absolute file system path. In addition to ``script``, it is also possible to supply for the application type ``module`` and ``paste``. For the case of ``module``, the target WSGI application will be taken to reside in a Python module with the specified name. This module will be loaded using the standard Python module import system and so must reside on the Python module search path. For the case of ``paste``, the target WSGI application will be taken to be a Paste deployment configuration file. In loading the Paste deployment configuration file, any WSGI application pipeline specified by the configuration will be constructed and the resulting top level WSGI application entry point returned used as the WSGI application. Note that the code file for the WSGI script file, Python module, or Paste deployment configuration file, if modified, will all result in the WSGI application being automatically reloaded on the next web request. 4. Added the ``--auth-user-script`` and ``--auth-type`` options to ``mod_wsgi-express`` to enable the hosted site to implement user authentication using either HTTP ``Basic`` or ``Digest`` authentication mechanisms. The ``check_password()`` or ``get_realm_hash()`` functions should follow the same form as if using the ``WSGIAuthUserScript`` direct with mod_wsgi when using manual configuration. 5. Added the ``--auth-group-script`` and ``--auth-group`` options to ``mod_wsgi-express`` to enable group authorization to be performed using a group authorization script, in conjunction with a user authentication script. The ``groups_for_user()`` function should follow the same form as if using the ``WSGIAuthGroupScript`` direct with mod_wsgi when using manual configuration. By default any users must be a member of the ``wsgi`` group. The name of this group though can be overridden using the ``--auth-group`` option. It is recommended that this be overridden rather than changing your own application to use the ``wsgi`` group. 6. Added the ``--directory-index`` option to ``mod_wsgi-express`` to enable a index resource to be added to the document root directory which would take precedence over the WSGI application for the root page for the site. 7. Added the ``--with-php5`` option to ``mod_wsgi-express`` to enable the concurrent hosting of a PHP web application in conjunction with the WSGI application. Due to the limitations of PHP, this is currently only supported if using prefork MPM. 8. Added the ``--server-name`` option to ``mod_wsgi-express``. When this is used and set to the host name for the web site, a virtual host will be created to ensure that the server only accepts web requests for that host name. If the host name starts with ``www.`` then web requests will also be accepted against the parent domain, that is the host name without the ``www.``, but those requests will be automatically redirected to the specified host name on the same port as that used for the original request. When the ``--server-name`` option is being used, the ``--server-alias`` option can also be specified, multiple times if need be, to setup alternate names for the web site on which web requests should also be accepted. Wildcard aliases may be used in the name if wishing to match multiple sub domains in one go. If for some reason you do still need to be able to access the server via ``localhost`` when a virtual host for a set server name is being used, you can supply the ``--allow-localhost`` option. 9. Added the ``--rotate-logs`` option to ``mod_wsgi-express`` to enable log file rotation. By default the error log and access log, if enabled, will be rotated when they reach 5MB in size. To change the size at which the log files will be rotated, use the ``--max-log-size`` option. If the ``rotatelogs`` command is not being found properly, its location can be specified using the ``--rotatelogs-executable`` option. 10. Added the ``--ssl-port`` and ``--ssl-certificate`` options to ``mod_wsgi-express``. When both are set, with the latter being the stub path for the SSL certificate ``.crt`` and ``.key`` file, then HTTPS requests will be handled over the designated SSL port. When ``--https-only`` is supplied, any requests made over HTTP to the non SSL port will be automatically redirected so as to use a HTTPS connection over the SSL connection. Note that if using the ``--allow-localhost`` option, redirection from a HTTP to HTTPS connection will not occur when access via ``localhost``. 11. Added the ``--setenv`` option to ``mod_wsgi-express`` to enable request specific name/value pairs to be added to the WSGI environ dictionary. The values are restricted to string values. Also added a companion ``--passenv`` option to ``mod_wsgi-express`` to indicate the names of normal process environment variables which should be added to the per request WSGI environ dictionary. 12. Added the ``WSGIMapHEADToGET`` directive for overriding the previous behaviour of automatically mapping any ``HEAD`` request to a ``GET`` request when an Apache output filter was registered that may want to see the complete response in order to generate correct response headers. The directive can be set to be either ``Auto`` (the default), ``On`` which will always map a ``HEAD`` to ``GET`` even if no output filters detected and ``Off`` to always preserve the original request method type. The original behaviour was to avoid problems with users trying to optimise for ``HEAD`` requests and then breaking caching mechanisms because the response headers for a ``HEAD`` request for a resource didn't match a ``GET`` request against the same resource as required by HTTP. If using mod_wsgi-express, the ``--map-head-to-get`` option can be used with the same values. 12. Added the ``--compress-responses`` option to ``mod_wsgi-express`` to enable compression of common text based responses such as plain text, HTML, XML, CSS and Javascript. mod_wsgi-5.0.0/docs/release-notes/version-4.3.1.rst000066400000000000000000000045011452636074700217200ustar00rootroot00000000000000============= Version 4.3.1 ============= Version 4.3.1 of mod_wsgi can be obtained from: https://github.com/GrahamDumpleton/mod_wsgi/archive/4.3.1.tar.gz Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. The ``install-module`` sub command of ``mod_wsgi-express`` was incorrectly trying to install the mod_wsgi ``.so`` file onto itself rather than into the Apache modules directory. 2. The workaround for the broken MacOS X Apache build scripts as implemented by the ``configure`` script used when building using the traditional make command wasn't working correctly for MacOS X 10.10 (Yosemite). In fixing this issue, the ``configure`` script has been enhanced such that it is now no longer to have the whole of the Xcode package installed on MacOS X. Instead the minimum required now is the developer command line tools. If using Python and you wanted to be able to install Python packages which has a source code component you would have already likely installed the developer command line tools. New Features ------------ 1. Added the ``--add-handler`` option to ``mod_wsgi-express`` to allow a WSGI application script file to be provided which is to handle any requests against static resources in the document root directory matching a specific extension type. 2. Added a mechanism to limit the amount of response content that can buffered in the Apache child worker processes when proxying back the response from a request which had been handled in a mod_wsgi daemon process. This is to combat a lack of flow control within Apache 2.2 which can cause excessive amounts of memory usage as a result of such buffered content. This issue in Apache 2.2 was fixed in Apache 2.4, but the new mechanism is applied to both versions for consistency. The default maximum on the amount of buffered content is 65536 bytes. This can be increased by using the ``proxy-buffer-size`` option to the ``WSGIDaemonProcess`` directive or the ``--proxy-buffer-size`` option to ``mod_wsgi-express``. If using Apache 2.4, its own flow control mechanism may override the value in increasing the buffer size. mod_wsgi-5.0.0/docs/release-notes/version-4.3.2.rst000066400000000000000000000055101452636074700217220ustar00rootroot00000000000000============= Version 4.3.2 ============= Version 4.3.2 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.3.2 Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. Linux behaviour when using ``connect()`` on a non blocking UNIX socket and the listener queue is full, is apparently not POSIX compliant and it returns ``EAGAIN`` instead of ``ECONNREFUSED``. The code handling errors from the ``connect()`` wasn't accomodating this non standard behaviour and so would fail immediately rather than retrying. 2. Only change working directory for mod_wsgi daemon process after having dropped privileges to target user. This is required where the specified working directory is on an NFS file system configured so as not to have root access priviliges. 3. The workaround for getting pyvenv style virtual environments to work with Python 3.3+ would break brew Python 2.7 on MacOS X as brew Python appears to not work in embedded systems which use Py_SetProgramName() instead of using Py_SetPythonHome(). Now only use Py_SetProgramName() if detect it is actually a pyvenv style virtual environment. This even appears to be okay for brew Python 3.4 at least as it does still work with the Py_SetProgramName() call even if brew Python 2.7 doesn't. New Features ------------ 1. If the ``WSGIPythonHome`` directive or the ``python-home`` option is used with the ``WSGIDaemonProcess`` directive, the path provided, which is supposed to be the root directory of the Python installation or virtual environment, will be checked to see if it is actually accessible and refers to a directory. If it isn't, a warning message will be logged along with any details providing an indication of what may be wrong with the supplied path. This is being done to warn when an invalid path has been supplied that subsequently is likely to be rejected and ignored by the Python interpreter. In such a situation where an invalid path is supplied the Python interpreter doesn't actually log anything and will instead silently fallback to using any Python installation it finds by seaching for ``python`` on the users ``PATH``. This may not be the Python installation or virtual environment you intended be used. 2. The Apache configuration snippet generated as an example when running the ``install-module`` sub command of ``mod_wsgi-express`` to install the ``mod_wsgi.so`` into the Apache installation itself, will now output a ``WSGIPythonHome`` directive for the Python installation or virtual environment the mod_wsgi module was compiled against so that the correct Python runtime will be used. mod_wsgi-5.0.0/docs/release-notes/version-4.4.0.rst000066400000000000000000000340151452636074700217230ustar00rootroot00000000000000============= Version 4.4.0 ============= Version 4.4.0 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.0 Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. When an exception occurs during the yielding of data from a generator returned from the WSGI application, and chunked transfer encoding was used on the response, then a '0' chunk would be errornously added at the end of the response content even though the response was likely incomplete. The result would be that clents wouldn't be able to properly detect that the response was truncated due to an error. This issue is now fixed for when embedded mode is being used. Fixing it for daemon mode is a bit trickier. 2. Response headers returned from the WSGI application running in daemon mode were being wrongly attached to the internal Apache data structure for ``err_headers_out`` instead of ``headers_out``. This meant that the ``Header`` directive of the ``mod_headers`` module, with its default condition of only checking ``onsuccess`` headers would not work as expected. In order to be able to check for or modify the response headers one would have had to use the ``Header`` directive with the ``always`` condition and if also working with an embedded WSGI application, also define a parallel ``Header`` directive but with the ``onsuccess`` condition. For daemon mode, response headers will now be correctly associated with ``headers_out`` and the ``onsuccess`` condition of the ``Header`` directive. The only exception to this in either embedded or daemon mode now is that of the ``WWW-Authenticate`` header, which remains associated with ``err_headers_out`` so that the header will survive an internal redirect such as to an ``ErrorDocument``. 3. When optional support for chunked requests was enabled, it was only working properly for embedded mode. The feature now also works properly for daemon mode. The directive to enable support for chunked request content is ``WSGIChunkedRequest``. The command line option when using mod_wsgi express is ``--chunked-request``. This is an optional feature, as the WSGI specification is arguably broken in not catering properly for mutating input filters or chunked request content. Support for chunked request content could be enabled by default, but then WSGI applications which don't simply read all available content and instead rely entirely on ``CONTENT_LENGTH``, would likely see a chunked request as having no content at all, as it would interpret the lack of the ``CONTENT_LENGTH`` as meaning the length of the content is zero. An attempt to get the WSGI specification ammended to be more sensible and allow what is a growing requirement to support chunked request content was ignored. Thus support is optional. You will need to enable this if you wish to rely on features of any WSGI framework that take the more sensible approach of ignoring ``CONTENT_LENGTH`` as a true indicator of content length. One such WSGI framework which provides some support for chunked request content is Flask/Werkzeug. Check its documentation or the code for Flask/Werkzeug to to see if any additional ``SetEnv`` directive may be required to enable the support in Flask/Werkzeug. 4. Fixed a potential request content data corruption issue when running a WSGI application in daemon mode. The bug in the code is quite obvious, yet unable to trigger it on older mod_wsgi versions. It was though triggering quite easily in the current release on MacOS X, prior to it being fixed, due to the changes made to support chunked request content for daemon processes. Suspect it is still a latent bug in older mod_wsgi versions, but the conditions under which it would trigger must have been harder to induce. The lack of reported problems may have been aided by virtue of Linux UNIX socket buffer size being quite large, in comparison to MacOS X, and so harder to create a condition where not all data could be written onto the UNIX socket in one call. Yet, when buffer sizes for the UNIX socket on MacOS X were increased, it was still possible to induce the bug. 5. When the ``--working-directory`` option for ``mod_wsgi-express`` was given a relative path name, that wasn't being translated to an absolute path name when substituting the ``home`` option of ``WSGIDaemonProcess`` causing server startup to fail. 6. When using ``--debug-mode`` of ``mod_wsgi-express`` the working directory for the application was not being added to ``sys.path``. This meant that if the WSGI script was referenced from a different directory, any module imports for other modules in that directory would fail. Features Changed ---------------- 1. Until recently, a failed attempt to change the working directory for a daemon process to the user the process runs as would be ignored. Now it will cause a hard failure that will prevent the daemon process from starting. This would cause issues where the user, usually the default Apache user, has not valid home directory. Now what will happens is that any attempt will only be made to change the working directory to the home directory of the user the daemon process runs as, if the 'user' option had been explicitly set to define the user and the user is different to the user that Apache child worker processes run as. In other words, is different to the default Apache user. 2. The support for the ``wdb`` debugger was removed. Decided that it wasn't mainstream enough and not ideal that still required a separate service and port to handle debugging sessions. New Features ------------ 1. Added new feature to ``mod_wsgi-express`` implementing timeouts on the reading of the request, including headers, and the request body. This feature uses the Apache module ``mod_reqtimeout`` to implement the feature. By default a read timeout on the initial request including headers of 15 seconds is used. This can dynamically increase up to a maximum of 30 seconds if the request data is received at a minimum required rate. By default a read timeout on the request body of 15 seconds is used. This can dynamically increase if the request data is received at a minimum required rate. The options to override the defaults are ``--header-timeout``, ``--header-max-timeout``, ``--header-min-rate``, ``--body-timeout``, ``--body-max-timeout`` and ``--body-min-rate``. For a more detailed explaination of this feature, consult the documentation for the Apache ``mod_reqtimeout`` module. 2. Added a new ``%{HOST}`` label that can be used when specifying the application group (Python sub interpreter context) to run the WSGI application in, via the ``WSGIApplicationGroup`` directive, or the ``application-group`` option to ``WSGIScriptAlias``. This new label will result in an application group being used with a name that corresponds to the name of the site as identified by the HTTP request ``Host`` header. Where the accepting port number is other than 80 or 443, then the name of the application group will be suffixed with the port number separated by a colon. Note that extreme care must be exercised when using this new label to specify the application group. This is because the HTTP request ``Host`` header is under the control of the user of the site. As such, it should only be used in conjunction with a configuration which adequately blocks access to anything but the expected hosts. For example, it would be dangerous to use this inside of a ``VirtualHost`` where the ``ServerAlias`` directive is used with a wildcard. This is because a user could pick arbitrary host names matching the wildcard and so force a new sub interpreter context to be created each time and so blow out memory usage. Similarly, caution should be exercised with ``mod_vhost_alias``, with any configuration forbidding any host which doesn't specifically match some specified resource such as a directory. Finally, this should probably never be used when not using either ``VirtualHost`` or ``mod_vhost_alias`` as in that case the server is likely going to accept any ``Host`` header value without exclusions. 3. Allow ``%{RESOURCE}``, ``%{SERVER}`` and ``%{HOST}`` labels to be used with the ``WSGIProcessGroup`` directive, or the ``process-group`` option of the ``WSGIScriptAlias`` directive. For this to work, it is still necessary to have setup an appropriate mod_wsgi daemon process group using the ``WSGIDaemonProcess`` directive, with name that will match the expanded value for the respective labels. If there is no matching mod_wsgi daemon process group specified, then a generic HTTP 500 internal server error response would be returned and the reason, lack of matching mod_wsgi daemon process group, being logged in the Apache error log. 4. Error messages and exceptions raised when there is a failure to read request content, or write back a response now provide the internal error indication from Apache as to why. For the ``IOError`` exceptions which are raised, that the exception originates within Apache/mod_wsgi is now flagged in the description associated with the exception. 5. When using mod_wsgi daemon mode and there is a timeout when reading request content in order to proxy it to the daemon process, a 408 request timeout HTTP response is now returned where as previously a generic 500 internal server error HTTP response was returned. Note that this doesn't mean that the WSGI application wasn't actually run. The WSGI application in the daemon process would have run as soon as the headers had been received. If the WSGI application had actually attempted to read the request content, it should also have eventually received an exception of type ``IOError`` when accessing ``wsgi.input`` to read the request content, due to a timeout or due to the proxy connection being closed before all request content was able to be read. If the WSGI application wasn't expecting any request content and had ignored it, even though some was present, it would still have run to completion and generated a response, but because the Apache child worker process was blocked waiting for content, when the timeout occurred the client would get the 408 HTTP response rather than the actual response generated by the WSGI application. 6. Added the ``--log-to-terminal`` option to ``mod_wsgi-express`` to allow the error log output to be directed to standard error for the controlling terminal, and the access log output, if enabled, to be directed to standard output. Similarly, the startup log output, if enabled, will be sent to standard error also. This should not be used in conjunction with ``--setup-only`` option when using the generated ``apachectl`` script, unless the ``-DFOREGROUND`` option is also being supplied to ``apachectl`` at the time it is run with the ``start`` command. 7. Added the ``--access-log-format`` option to ``mod_wsgi-express``. By default if the access log is enabled, entries will follow the 'common' log format as typically used by Apache. You have two options of how you can use the ``--access-log-format``. The first is to give it the argument 'combined', which will then cause it to use this alternate log format which is again often used with Apache. The other is to specify the log format string yourself. The format string can contain format string components as would be used with the ``LogFormat`` directive. For example, to specify the equivalent to the 'common' log format, you could use:: --access-log-format "%h %l %u %t \"%r\" %>s %b" This 'common' log format is identified via a nickname in the same way 'combined' is, so if you did have to specify it explicitly for some reason, you could just have instead used:: --access-log-format common 8. Added the ``--newrelic-config-file`` and ``--newrelic-environment`` options to ``mod_wsgi-express``. This allows these to be set using command line options rather than requiring the New Relic environment variables. Importantly, when the options are used, the values will be embedded in the generated files if using ``--setup-only``. Thus they will still be set when later using the ``apachectl`` control script to start the server. Note that when these options are used, they will cause the equivalent New Relic environment variable for that option to be ignored, both if running the server immediately, or if using ``--setup-only`` and running the server later using ``apachectl``. 9. Added the ``--enable-debugger`` option to ``mod_wsgi-express``. When specified and at the same time the ``--debug-mode`` option is specified, then when an exception is raised from the initial execution of the WSGI application, when consuming the response iterable, or when calling any ``close()`` method of the response iterable, then post mortem debugging of the exception will be triggered. Post mortem debugging is performed using the Python debugger (pdb). 10. Added the ``--enable-coverage`` option to ``mod_wsgi-express``. When specified and at the same time the ``--debug-mode`` option is specified, then coverage analysis is enabled. When the server is exited, then the HTML reports will be output to the ``htmlcov`` directory under the server working directory, or the directory specified using the ``--coverage-directory`` option. The ``coverage`` module must be installed for this feature to work. 11. Added the ``--enable-profiler`` option to ``mod_wsgi-express``. When specified and at the same time the ``--debug-mode`` option is specified, then coverage analysis is enabled. When the server is exited, then the profiler data will be output to the ``pstats.dat`` file under the server working directory, or the file specified using the ``--profiler-output-file`` option. 12. Added the ``--python-path`` option to ``mod_wsgi-express`` to specify additional directories that should be added to the Python module search path. Note that these directories will not be processed for ``.pth`` files. If processing of ``.pth`` files is required, then the ``PYTHONPATH`` environment variable should be set and exported in a script file referred to using the ``--envvars-script`` option. mod_wsgi-5.0.0/docs/release-notes/version-4.4.1.rst000066400000000000000000000070701452636074700217250ustar00rootroot00000000000000============= Version 4.4.1 ============= Version 4.4.1 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.1 Known Issues ------------ 1. The makefiles for building mod_wsgi on Windows are currently broken and need updating. As most new changes relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Bugs Fixed ---------- 1. Process crashes could occur when request content had been consumed by the WSGI application. The trigger was when the Python ``wsgi.input`` was still in existence after the web request had finished. The destruction of the ``wsgi.input`` object was accessing memory which had already been released back to the Apache memory pools and potentially reused. This could cause crashes or other unexplained behaviour. This issue was introduced in version 4.4.0 of mod_wsgi. Features Changed ---------------- 1. When an error occurs in writing back a response to the HTTP client, during the consumption of the iterable returned by the WSGI application, the message will now be logged at debug level rather than error level. Note that under Apache 2.2 it isn't possible to suppress the message generated by Apache itself from the core_output_filter, so that may still appear. 2. The ``--profiler-output-file`` option for ``mod_wsgi-express`` was changed to ``--profiler-directory`` and now refers to a directory, with individual pstats files being added to the directory for each session rather than reusing the same name all the time. New Features ------------ 1. Added the ``--server-mpm`` option to ``mod_wsgi-express``. With this option, if you are using Apache 2.4 with dynamically loadable MPM modules and more than one option for the MPM is available, you can specify your preference for which is used. If not specified, then the precedence order for MPMs is 'event', 'worker' and finally 'prefork'. 2. Added ``static`` as an option for ``--application-type`` when running ``mod_wsgi-express``. When set as ``static``, only static files will be served. One can still set specific handler types for different extensions which may invoke a Python handler script, but there will be no global fallback WSGI application for any URLs that do not map to static files. In these cases a normal HTTP 404 response will be returned instead. 3. Added ``--host-access-script`` option to ``mod_wsgi-express`` to allow a Python script to be provided which can control host access. This uses the ``WSGIAccessScript`` directive and the handler script should define an ``allow_access(environ, host)`` function which returns ``True`` if access is allowed or ``False`` if blocked. 4. Added ``--debugger-startup`` option to be used in conjunction with the ``--enable-debugger`` option of ``mod_wsgi-express`` when in debug mode. The option will cause the debugger to be activated on server start before any requests are handled to allow breakpoints to be set. 5. Added a ``socket-user`` option to ``WSGIDaemonProcess`` to allow the owner of the UNIX listener socket for the daemon process group to be overridden. This can be used when using mod_ruid2 to change the owner of the socket from the default Apache user, to the user under which mod_ruid2 will run Apache when handling requests. This is necessary otherwise the Apache child worker process will not be able to connect to the listener socket for the mod_wsgi daemon process to proxy the request to the WSGI application. 6. Added a ``--enable-recorder`` option for enabling request recording when also using debug mode. mod_wsgi-5.0.0/docs/release-notes/version-4.4.10.rst000066400000000000000000000101751452636074700220050ustar00rootroot00000000000000============== Version 4.4.10 ============== Version 4.4.10 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.10 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. Fixed a reference counting bug which would cause a daemon process to crash if both ``home`` and ``python-path`` options were specified at the same time with the ``WSGIDaemonProcess`` directive. 2. When using ``--https-only`` option with ``mod_wsgi-express``, the redirection from the ``http`` address to the ``https`` address was not setting the correct port for ``https``. Features Changed ---------------- 1. Changed the default Apache log level for ``mod_wsgi-express`` to ``warn`` instead of ``info``. This has been done avoid very noisy logs when enabling secure HTTP connections. To set back to ``info`` level use the ``--log-level`` option. 2. When specifying a service script with the ``--service-script`` option of ``mod_wsgi-express``, the home directory for the process will now be set to the same home directory as used for the hosted WGSI application. Python modules from the WSGI application will therefore be automatically found when imported. Any directory paths added using ``--python-path`` option will also be added as search directories for Python module imports, with any ``.pth`` files in those directories also being handled. In addition, the language locale and Python eggs directory used by the hosted WSGI application will also be used for the service script. 3. When specifying ``--python-path`` option, when paths are now setup for the WSGI application, they will be added in such a way that they appear at the head of ``sys.path`` and any ``.pth`` files in those directories are also handled. New Features ------------ 1. Added the ``--directory-listing`` option to ``mod_wsgi-express`` to allow automatic directory listings to be enabled when using the static file application type and no explicit directory index file has been specified. 2. In addition to the convenience function of ``--ssl-certificate`` for ``mod_wsgi-express``, which allowed the SSL certificate and private key file to be specified using one option by specifying the command file name up to the extension, separate ``--ssl-certificate-file`` and ``--ssl-certificate-key-file`` options are now also provided. These would either both need to be specified, or the existing ``--ssl-certificate`` option used, when specifying that secure HTTPS connections should be used through having specified ``--https-port``. 3. Added the ``--ssl-ca-certificate-file`` option to ``mod_wsgi-express``. If specified this should give the location of the file with any CA certificates to be used for client authentication. As soon as this option is provided, the client authentication will be required for the whole site. This would generally be used in conjunction with the ``--https-only`` option so that only a secure communication channel is being used. If you do not wish for the whole site to required client authentication, you can use the ``--ssl-verify-client`` option to specify sub URLs for which client authentication should be performed. 4. Added the ``--ssl-environment`` option to ``mod_wsgi-express`` to enable the passing of standard SSL variables in the WSGI environ dictionary passed to the WSGI application. 5. Added the ``WSGITrustedProxies`` directive and corresponding option of ``--trust-proxy`` to ``mod_wsgi-express``. This works in conjunction with the ``WSGITrustedProxyHeaders`` directive and ``--trust-proxy-header`` option of ``mod_wsgi-express``. When trusted proxies are specified, then proxy headers will only be trusted if the request originated with a trusted proxy. Further, any IP addresses corresponding to a proxy listed in the ``X-Forwarded-For`` header will only be trusted if specified. When determining the value for ``REMOTE_ADDR`` the IP preceding the last recognised proxy the request passed through will be used and not simply the first IP listed in the header. The header will be rewritten to reflect what was honoured with client IPs of dubious origin discarded. mod_wsgi-5.0.0/docs/release-notes/version-4.4.11.rst000066400000000000000000000051171452636074700220060ustar00rootroot00000000000000============== Version 4.4.11 ============== Version 4.4.11 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.11 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. No provision was made for operating systems with a very low limit on the number of separate data blocks that could be passed to system ``writev()`` call. This was an issue on Solaris where the limit is 16 and meant that since version 4.4.0, daemon mode of mod_wsgi would fail where a HTTP request had more than a small number of headers. 2. When installing the ``mod_wsgi`` package using ``pip`` and rather than activating the virtual environment you were referring to ``pip`` by path from the ``bin`` directory, the ``mod_wsgi-httpd`` package which had already been installed into the virtual environment would not be detected. New Features ------------ 1. Added the ``--service-log`` option to ``mod_wsgi-express`` for specifying the name of a log file for a specific service script. The arguments are the name of the service and the file name for the log. The log file will be placed in the log directory, be it the default, or a specific log directory if specified. 2. Set various environment variables from ``mod_wsgi-express`` to identify that it is being used, what hosts it is handling requests for, and whether debug mode and/or specific debug mode features are enabled. This is so that a web application can modify it's behaviour when ``mod_wsgi-express`` is being used, or being used in specific ways. The environment variables which are set are: * *MOD_WSGI_EXPRESS* - Indicates that ``mod_wsgi-express`` is being used. * *MOD_WSGI_SERVER_NAME* - The primary server host name for the site. * *MOD_WSGI_SERVER_ALIASES* - Secondary host names the site is known by. * *MOD_WSGI_RELOADER_ENABLED* - Indicates if source code reloading enabled. * *MOD_WSGI_DEBUG_MODE* - Indicates if debug mode has been enabled. * *MOD_WSGI_DEBUGGER_ENABLED* - Indicates pdb debugger has been enabled. * *MOD_WSGI_COVERAGE_ENABLED* - Indicates if coverage analysis has been enabled. * *MOD_WSGI_PROFILER_ENABLED* - Indicates if code profiling has been enabled. * *MOD_WSGI_RECORDER_ENABLED* - Indicates if request/response recording enabled. * *MOD_WSGI_GDB_ENABLED* - Indicates if gdb process crash debugging enabled. For any environment variable indicating a feature has been enabled, it will be set when enabled and have the value 'true'. For the list of server aliases, it will be a space separated list of host names. mod_wsgi-5.0.0/docs/release-notes/version-4.4.12.rst000066400000000000000000000013461452636074700220070ustar00rootroot00000000000000============== Version 4.4.12 ============== Version 4.4.12 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.12 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. If the WSGI application when run under daemon mode returned response content as many small blocks, this could result in excessive memory usage in the Apache child worker process proxying the request due to many buckets being buffered until the buffer size threshold was reached. If the number of buckets reaches a builtin threshold the buffered data will now be forcibly flushed even if the size threshold hadn't been reached. mod_wsgi-5.0.0/docs/release-notes/version-4.4.13.rst000066400000000000000000000041701452636074700220060ustar00rootroot00000000000000============== Version 4.4.13 ============== Version 4.4.13 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.13 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. The pip installable 'mod_wsgi' package was failing to install on OpenShift and Heroku as ``mod_wsgi-apxs`` isn't used for tarball based installs. Features Changed ---------------- 1. For ``mod_wsgi-express``, only the web server type is now shown in the server tokens sent back in the ``Server`` response header. This prevents users from knowing any specifics and thus using that to determine possible vulnerabilities. New Features ------------ 1. Set environment variables from ``apachectl`` for ``mod_wsgi-express`` about the server environment which can be used in additional Apache configuration included into the generated configuration. The environment variables are: * *MOD_WSGI_SERVER_ROOT* - This is the directory where the generated configuration files, startup scripts, etc were placed. * *MOD_WSGI_WORKING_DIRECTORY* - This is the directory which will be used as the current working directory of the process. Would default to being the same as ``MOD_WSGI_SERVER_ROOT`` if not overridden. * *MOD_WSGI_LISTENER_HOST* - The host name or IP on which connections are being accepted. This should only be used if the Apache configuration variable ``MOD_WSGI_WITH_LISTENER_HOST`` is defined. * *MOD_WSGI_HTTP_PORT* - The port on which HTTP connections are being accepted. * *MOD_WSGI_HTTPS_PORT* - The port on which HTTPS connections are being accepted. This should only be used if the Apache configuration variable ``MOD_WSGI_WITH_HTTPS`` is defined. * *MOD_WSGI_MODULES_DIRECTORY* - The directory where the Apache modules are installed. * *MOD_WSGI_RUN_USER* - The user that the WSGI application will be run as. * *MOD_WSGI_RUN_GROUP* - The group that the WSGI application will be run as. 2. Added ``X-Client-IP`` to list of possible trusted headers indicating the true remote address of client when passing through a proxy. mod_wsgi-5.0.0/docs/release-notes/version-4.4.14.rst000066400000000000000000000036261452636074700220140ustar00rootroot00000000000000============== Version 4.4.14 ============== Version 4.4.14 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.14 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. The ``--compress-responses`` option of ``mod_wsgi-express`` was failing when Apache 2.4 was used. This was because ``mod_filter`` module is required when using Apache 2.4 and it wasn't being loaded. 2. On Python 3, the IO object wrapped by ``sys.stdout`` and ``sys.stderr``, according to the Python documentation, must provide a ``fileno()`` method even though no file descriptor exists corresponding to the Apache error logs. The method should raise ``IOError`` if called to indicate not file descriptor can be returned. Previously, an attempt to use ``fileno()`` on ``sys.stdout`` and ``sys.stderr`` would raise an ``AttributeError`` instead due to there being no ``fileno()`` method. 3. Use compiler include flags from running of ``apr-config`` and ``apu-config`` when doing ``pip`` install of ``mod_wsgi-express``. This is necessary as on MacOS X 10.11 El Capitan the include flags for APR returned by ``apxs`` refer to the wrong location causing installation to fail. New Features ------------ 1. When proxying a URL path or a virtual host, now setting request header for ``X-Forwarded-Port`` so back end knows correct port that front end used. 2. When proxying a URL path, if the request came in over a secure HTTP connection, now setting request header for ``X-Forwarded-Scheme`` so back end knows that front end handled the request over a secure connection. The value of the header will be ``https``. 3. When using ``mod_wsgi-express``, it is now possible to supply the ``--with-cgi`` option, with any files in the document root directory with a '.cgi' extension then being processed as traditional CGI scripts. mod_wsgi-5.0.0/docs/release-notes/version-4.4.15.rst000066400000000000000000000021401452636074700220030ustar00rootroot00000000000000============== Version 4.4.15 ============== Version 4.4.15 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.15 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. When specifying multiple directories for the Python module search path using the ``WSGIPythonPath`` directive, or the ``python-path`` option to ``WSGIDaemonProcess``, it was failing under Python 3 due to incorrect logging. It was therefore only possible to add a single directory. 2. If Apache was already running when the mod_wsgi module was enabled or otherwise configured to be loaded, and then an Apache graceful restart was done so that it would be loaded for the first time, all child processes would crash when starting up and would keep crashing, requiring Apache be shutdown. This would occur as Python initialisation was not being performed correctly in this specific case where mod_wsgi was loaded when Apache was already running and a graceful restart, rather than a normal restart was done. mod_wsgi-5.0.0/docs/release-notes/version-4.4.16.rst000066400000000000000000000011241452636074700220050ustar00rootroot00000000000000============== Version 4.4.16 ============== Version 4.4.16 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.16 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. If ``/dev/stderr`` cannot be opened for writing when startup log is requested and logging to the terminal, then ``mod_wsgi-express`` would fail. Now attempt fallback to using ``/dev/tty`` and if that cannot be opened either, then give up on trying to use terminal for startup log. mod_wsgi-5.0.0/docs/release-notes/version-4.4.17.rst000066400000000000000000000010631452636074700220100ustar00rootroot00000000000000============== Version 4.4.17 ============== Version 4.4.17 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.17 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. If ``mod_wsgi-express`` was run under a user ID for which there was no password entry in the system password file, it would fail when looking up the user name. If this occurs now use ``#nnn`` as the default user name, where ``nnn`` is the user ID. mod_wsgi-5.0.0/docs/release-notes/version-4.4.18.rst000066400000000000000000000010651452636074700220130ustar00rootroot00000000000000============== Version 4.4.18 ============== Version 4.4.18 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.18 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. If ``mod_wsgi-express`` was run under a user ID for which there was no password entry in the system password file, it would fail when looking up the group name. If this occurs now use ``#nnn`` as the default group name, where ``nnn`` is the user ID. mod_wsgi-5.0.0/docs/release-notes/version-4.4.19.rst000066400000000000000000000010701452636074700220100ustar00rootroot00000000000000============== Version 4.4.19 ============== Version 4.4.19 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.19 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. Daemon mode processes were crashing when attempting to set ``USER``, ``USERNAME``, ``LOGNAME`` or ``HOME`` when no password entry could be found for the current user ID. Now do not attempt to set these if the user ID doesn't have a password file entry. mod_wsgi-5.0.0/docs/release-notes/version-4.4.2.rst000066400000000000000000000035231452636074700217250ustar00rootroot00000000000000============= Version 4.4.2 ============= Version 4.4.2 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.2 Known Issues ------------ 1. Although the makefiles for building mod_wsgi on Windows have now been updated for the new source code layout, some issues are being seen with mod_wsgi on Apache 2.4. These issues are still being investigated. As most new changes in 4.X relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Binaries compiled by a third party can be obtained from: * http://www.lfd.uci.edu/~gohlke/pythonlibs/#mod_wsgi Features Changed ---------------- 1. The ``--ssl-port`` option has been deprecated in favour of the option ``--https-port``. Strictly speaking SSL no longer exists and has been supplanted with TLS. The 'S' in 'HTTPS' is actually meant to mean secure and not 'SSL'. So change name of option to properly match terminoligy. 2. The name of the startup log was changed such that naming was consistent with how logs are normally named with Apache. That is ``startup_log`` instead of ``startup.log``, thereby matching convention with ``error_log`` and ``access_log``. Bugs Fixed ---------- 1. When a default language was specified using the ``locale`` option to the ``WSGIDaemonProcess`` directive or the ``--locale`` option to ``mod_wsgi-express``, if it did not actually match a locale supported by the operating system, that the locale couldn't be set wasn't logged. Such a message is now logged along with a suggestion to use ``C.UTF-8`` as a fallback locale if the intent is to have ``UTF-8`` support. 2. When using the ``--https-only`` option with ``mod_wsgi-express``, a HTTP request was not being redirected to be a HTTPS request when there were no server aliases specified. mod_wsgi-5.0.0/docs/release-notes/version-4.4.20.rst000066400000000000000000000010021452636074700217730ustar00rootroot00000000000000============== Version 4.4.20 ============== Version 4.4.20 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.20 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. Post mortem debugger would fail if the exception was raised during yielding of items from a WSGI application, or inside of any ``close()`` callable of an iterator returned from the WSGI application. mod_wsgi-5.0.0/docs/release-notes/version-4.4.21.rst000066400000000000000000000026121452636074700220040ustar00rootroot00000000000000============== Version 4.4.21 ============== Version 4.4.21 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.21 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Features Changed ---------------- 1. When any of the options ``--enable-debugger``, ``--enable-debugger``, ``--enable-coverage``, ``--enable-profiler``, ``--enable-recorder`` or ``--enable-gdb`` are used, debug module will now automatically be enabled. Previously you had to also supply the ``--debug-mode`` option otherwise these options wouldn't be honoured. New Features ------------ 1. Add a WSGI test application to ``mod_wsgi-express`` which returns back details of the request headers, application environment and request content as the response. This can be used for testing how requests are passed through and also what the execution environment looks like. It can be used by running:: mod_wsgi-express start-server --application-type module mod_wsgi.server.environ 2. Added ``--entry-point`` option to ``mod_wsgi-express`` as more explicit way of identifying the file or module name containing the WSGI application entry point or description. This is in addition to simply being able to list it without any option. The explicit way just makes it easier to see the purpose when you have a long list of options. mod_wsgi-5.0.0/docs/release-notes/version-4.4.22.rst000066400000000000000000000027531452636074700220130ustar00rootroot00000000000000============== Version 4.4.22 ============== Version 4.4.22 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.22 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. Stack traces logged at ``INFO`` level when a request timeout occurred were not displaying correctly when Python 3 was being used. It is possible that the logging code could also have caused the process to then crash as the process was shutting down. 2. When using the ``--url-alias`` option with ``mod_wsgi-express`` and the target directory had a trailing slash, that trailing slash was being incorrectly dropped. This would cause URL lookup to fail when the URL for the directory was a sub URL and also had a trailing slash. New Features ------------ 1. When using ``mod_wsgi-express``, rewrite rules can now be added into the ``rewrite.conf`` file located under the server root directory. An alternate location for the rewrite rules can be specified using the ``--rewrite-rules`` option. Note that the rewrite rules are included within a ``Directory`` block of the Apache configuration file, for the document root directory. Any rules therefore needs to be written so as to work in this context. If you need to debug the rewrite rules and are using Apache 2.4, the easiest way to enable rewrite logging is to use the ``--log-level`` option with the quoted value of ``'info rewrite:trace8'``. mod_wsgi-5.0.0/docs/release-notes/version-4.4.23.rst000066400000000000000000000011721452636074700220060ustar00rootroot00000000000000============== Version 4.4.23 ============== Version 4.4.23 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.23 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 New Features ------------ 1. Added the ``--ssl-certificate-chain-file`` option to ``mod_wsgi-express``, for specifying the path to a file containing the certificates of Certification Authorities (CA) which form the certificate chain of the server certificate. This is equivalent to having used the Apache ``SSLCertificateChainFile`` directive. mod_wsgi-5.0.0/docs/release-notes/version-4.4.3.rst000066400000000000000000000053201452636074700217230ustar00rootroot00000000000000============= Version 4.4.3 ============= Version 4.4.3 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.3 Known Issues ------------ 1. Although the makefiles for building mod_wsgi on Windows have now been updated for the new source code layout, some issues are being seen with mod_wsgi on Apache 2.4. These issues are still being investigated. As most new changes in 4.X relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Binaries compiled by a third party can be obtained from: * http://www.lfd.uci.edu/~gohlke/pythonlibs/#mod_wsgi Features Changed ---------------- 1. The ``--lang`` option to ``mod_wsgi-express`` has been deprecated. Any default language locale setting should be set exclusively using the ``--locale`` option. 2. The behaviour of the ``--locale`` option to ``mod_wsgi-express`` has changed. Previously if this option was not defined, then both of the locales ``en_US.UTF-8`` and ``C.UTF-8`` have at times been hardwired as the default locale. These locales are though not always present. As a consequence, a new algorithm is now used. If the ``--locale`` option is supplied, the argument will be used as the locale. If no argument is supplied, the default locale for the executing ``mod_wsgi-express`` process will be used. If that however is ``C`` or ``POSIX``, then an attempt will be made to use either the ``en_US.UTF-8`` or ``C.UTF-8`` locales and if that is not possible only then fallback to the default locale of the ``mod_wsgi-express`` process. In other words, unless you override the default language locale, an attempt is made to use an English language locale with ``UTF-8`` encoding. 3. Unless the process name is overridden using ``--process-name`` option to ``mod_wsgi-express``, the Apache parent and child worker process will be given a name such as ``httpd (mod_wsgi-express)`` making them more easily distinguishable from a traditional Apache installation. Bugs Fixed ---------- 1. The ``mod_wsgi-express`` script would fail on startup if the user had a corresponding group ID which didn't actually match an existing group in the groups file and no override group was being specified. When this occurs, the group will now be specified as ``#nnn`` where ``nnn`` is the group ID. New Features ------------ 1. Added ``--process-name`` option to ``mod_wsgi-express`` to allow the name of the Apache parent process to be overridden as it would be displayed in ``ps``. This is necessary under some process manager systems where it looks for a certain name, but with shell script wrappers and exec calls happening around ``mod_wsgi-express`` the name would change. mod_wsgi-5.0.0/docs/release-notes/version-4.4.4.rst000066400000000000000000000020361452636074700217250ustar00rootroot00000000000000============= Version 4.4.4 ============= Version 4.4.4 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.4 Known Issues ------------ 1. Although the makefiles for building mod_wsgi on Windows have now been updated for the new source code layout, some issues are being seen with mod_wsgi on Apache 2.4. These issues are still being investigated. As most new changes in 4.X relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Binaries compiled by a third party can be obtained from: * http://www.lfd.uci.edu/~gohlke/pythonlibs/#mod_wsgi New Features ------------ 1. The ``mod_wsgi-express`` command will now output to ``stdout`` the number of daemon processes and threads being used. 2. Add automatic installation of precompiled Apache binaries when deploying ``mod_wsgi-express`` to Heroku or OpenShift. These binaries will be pulled down from S3 and installed as part of the mod_wsgi package. mod_wsgi-5.0.0/docs/release-notes/version-4.4.5.rst000066400000000000000000000015711452636074700217310ustar00rootroot00000000000000============= Version 4.4.5 ============= Version 4.4.5 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.5 Known Issues ------------ 1. Although the makefiles for building mod_wsgi on Windows have now been updated for the new source code layout, some issues are being seen with mod_wsgi on Apache 2.4. These issues are still being investigated. As most new changes in 4.X relate to mod_wsgi daemon mode, which is not supported under Windows, you should keep using the last available binary for version 3.X on Windows instead. Binaries compiled by a third party can be obtained from: * http://www.lfd.uci.edu/~gohlke/pythonlibs/#mod_wsgi Bugs Fixed ---------- 1. When installing ``mod_wsgi-express`` from PyPi on OpenShift as a dependency of an application ``setup.py`` file, the precompiled Apache binaries would not be installed. mod_wsgi-5.0.0/docs/release-notes/version-4.4.6.rst000066400000000000000000000054571452636074700217410ustar00rootroot00000000000000============= Version 4.4.6 ============= Version 4.4.6 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.6 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. Apache 2.2.29 and 2.4.11 introduce additional fields to the request structure ``request_rec`` due to CVE-2013-5704. The addition of these fields will cause versions of mod_wsgi from 4.4.0-4.4.5 to crash when used in mod_wsgi daemon mode and mod_wsgi isn't initialising the new structure members. If you are upgrading your Apache installation to those versions or later versions, you must also update to mod_wsgi version 4.4.6. The mod_wsgi 4.4.6 source code must have also been compiled against the newer Apache version. In recompiling mod_wsgi 4.4.6 source code against the newer Apache versions the source code is able to detect the new fields exist at compile time by checking a compile time version number. One problem that can arise is that where a CVE is raised for a security issue, Linux distributions will back port the change to older Apache versions. When they do this though, the compile time version number isn't changed, so mod_wsgi cannot detect at compile time when built against Apache versions with the backport that the additional fields exist. To combat this problem, mod_wsgi will do some runtime checks which look at the actual size of ``request_rec`` and calculate whether the additional fields have been added by way of a backported change. In this case mod_wsgi will then set the fields as necessary. As a final fail safe for forward compatibility. If the current mod_wsgi source code is compiled against a version of Apache which doesn't have the CVE change applied, it will pad the ``request_rec`` and optimistically set the fields anyway. This is to deal with the situation where mod_wsgi is compiled against an older Apache and then that Apache is upgraded to one with the CVE change, but mod_wsgi is not recompiled so that the additional fields can be detected at compile time. 2. Override ``LC_ALL`` environment variable when ``locale`` option to the ``WSGIDaemonProcess`` directive. It is not always sufficient to just call ``setlocale()`` as some Python code, including interpreter initialisation can still consult the original ``LC_ALL`` environment variable. In this case this can result in an undesired file system encoding still being selected. New Features ------------ 1. Added ``--enable-gdb`` option to ``mod_wsgi-express`` for when running in debug mode. With this option set, Apache will be started up within ``gdb`` allowing the debug of process crashes on startup or while handling requests. If the ``gdb`` program is not in ``PATH``, the ``--gdb-executable`` option can be set to give its location. mod_wsgi-5.0.0/docs/release-notes/version-4.4.7.rst000066400000000000000000000062321452636074700217320ustar00rootroot00000000000000============= Version 4.4.7 ============= Version 4.4.7 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.7 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Features Changed ---------------- 1. The ``proxy-buffer-size`` option to ``WSGIDaemonProcess`` directive was renamed to ``response-buffer-size`` to avoid confusion with options related to normal HTTP proxying. The ``--proxy-buffer-size`` option of ``mod_wsgi-express`` was similarly renamed to ``--response-buffer-size``. New Features ------------ 1. Added ``--service-script`` option to ``mod_wsgi-express`` to allow a Python script to be loaded and executed in the context of a distinct daemon process. This can be used for executing a service to be managed by Apache, even though it is a distinct application. The options take two arguments, a short name for the service and the path to the Python script for starting the service. If ``mod_wsgi-express`` is being run as root, then a user and group can be specified for the service using the ``--service-user`` and ``--service-group`` options. The options take two arguments, a short name for the service and the user or group name respectively. 2. Added ``--proxy-url-alias`` option to ``mod_wsgi-express`` for setting up proxying of a sub URL of the site to a remote URL. 3. Added ``--proxy-virtual-host`` option to ``mod_wsgi-express`` for setting up proxying of a whole virtual host to a remote URL. Only supports proxying of HTTP requests and not HTTPS requests. 4. Added ``eviction-timeout`` option to ``WSGIDaemonProcess`` directive. For the case where the graceful restart signal, usually ``SIGUSR1``, is sent to a daemon process to evict the WSGI application and restart the process, this controls how many seconds the process will wait, while still accepting new requests, before it reaches an idle state with no active requests and shuts down. The ``graceful-timeout`` option previously performed this exact role in this case previously, but a separate option is being added to allow a different timeout period to be specified for the case for forced eviction. The existing ``graceful-timeout`` option is still used when a maximum requests option or CPU usage limit is set. For backwards compatibility, if ``eviction-timeout`` isn't set, it will fall back to using any value specified using the ``graceful-timeout`` option. The ``--eviction-timeout`` option has also been added to ``mod_wsgi-express`` and behaves in a similar fashion. 5. Added support for new ``mod_wsgi-httpd`` package. The ``mod_wsgi-httpd`` package is a pip installable package which will build the Apache httpd server and install it into the Python installation. If the ``mod_wsgi-httpd`` package is installed before installing this package, then the Apache httpd server installation installed by ``mod_wsgi-httpd`` will be used instead of any system installed version of the Apache httpd server when running ``mod_wsgi-express``. This allows you to workaround any inability to upgrade the main Apache installation, or install its 'dev' package if missing, or install it outright if not present. mod_wsgi-5.0.0/docs/release-notes/version-4.4.8.rst000066400000000000000000000021711452636074700217310ustar00rootroot00000000000000============= Version 4.4.8 ============= Version 4.4.8 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.8 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Bugs Fixed ---------- 1. The eviction timeout was not being correctly applied when request timeout wasn't being applied at the same time. It may have partly worked if any of inactivity or graceful timeout were also specified, but the application of the timeout may still have been delayed. New Features ------------ 1. Added the ``--error-log-name`` option to ``mod_wsgi-express`` to allow the name of the file used for the error log, when being written to the log directory, to be overridden. 2. Added the ``--access-log-name`` option to ``mod_wsgi-express`` to allow the name of the file used for the access log, when being written to the log directory, to be overridden. 3. Added the ``--startup-log-name`` option to ``mod_wsgi-express`` to allow the name of the file used for the startup log, when being written to the log directory, to be overridden. mod_wsgi-5.0.0/docs/release-notes/version-4.4.9.rst000066400000000000000000000124471452636074700217410ustar00rootroot00000000000000============= Version 4.4.9 ============= Version 4.4.9 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.4.9 For details on the availability of Windows binaries see: https://github.com/GrahamDumpleton/mod_wsgi/tree/master/win32 Features Changed ---------------- 1. The ``--proxy-url-alias`` option of ``mod_wsgi-express`` has been superseded by the ``--proxy-mount-point`` option. This option now should only be used to proxy to a whole site or sub site and not individual file resources. If the mount point URL for what should be proxied doesn't have a trailing slash, the trailing slash redirection will first be performed on the proxy for the mount point rather than simply passing it through to the backend. 2. The signal handler intercept will now be removed automatically from a Python child process forked from either an Apache child process or a daemon process. This avoids the requirement of setting ``WSGIRestrictSignal`` to ``Off`` if wanting to setup new signal handlers from a forked child process. 3. The signal handler registrations setup in daemon processes to manage process shutdown, will now revert to exiting the process when invoked from a Python process forked from a daemon process. This avoids the need to set new signal handlers in forked processes to override what was inherited. Note that this only applies to processes forked from daemon mode processes. If you are forking processes when your WSGI application is running in embedded mode, it is still a good idea to set signal handles for ``SIGINT``, ``SIGTERM`` and ``SIGUSR1`` back to ``SIG_DFL`` using ``signal.signal()`` if you want to avoid the possibility of strange behaviour due to the inherited Apache child worker process signal registrations. New Features ------------ 1. Added ``--hsts-policy`` option to ``mod_wsgi-express`` to allow a HSTS (``Strict-Transport-Security``) policy response header to be specified which should be included when the ``--https-only`` option is used to ensure that the site only accepts HTTPS connections. 2. Added ``WSGITrustedProxyHeaders`` directive. This allows you to specify a space separated list of inbound HTTP headers used to transfer client connection information from a proxy to a backend server, that are trusted. When the specified headers are seen in a request, the values passed via them will be used to fix up the values in the WSGI ``environ`` dictionary to reflect client information as was seen by the proxy. Only the specific headers you are expecting and which is guaranteed to have only been set by the proxy should be listed. Whether it exists or not, all other headers in a category will be removed so as to avoid an issue with a forged header getting through to a WSGI middleware which is looking for a different header and subsequently overriding whatever the trusted header specified. This applies to the following as well when more than one convention is used for the header name. The header names which are accepted for specifying the HTTP scheme used are ``X-Forwarded-Proto``, ``X-Forwarded-Scheme`` and ``X-Scheme``. It is expected that the value these supply will be ``http`` or ``https``. When it is ``https``, the ``wsgi.url_scheme`` value in the WSGI ``environ`` dictionary will be overridden to be ``https``. Alternate headers accepted are ``X-Forwarded-HTTPS``, ``X-Forwarded-SSL`` and ``X-HTTPS``. If these are passed, the value needs to be ``On``, ``true`` or ``1``. A case insensitive match is performed. When matched, the ``wsgi.url_scheme`` value in the WSGI ``environ`` dictionary will be overridden to be ``https``. The header names which are accepted for specifying the target host are ``X-Forwarded-Host`` and ``X-Host``. When found, the value will be used to override the ``HTTP_HOST`` value in the WSGI ``environ`` dictionary. The sole header name accepted for specifying the front end proxy server name is ``X-Forwarded-Server``. When found, the value will be used to override the ``SERVER_NAME`` value in the WSGI ``environ`` dictionary. The sole header name accepted for specifying the front end proxy server port is ``X-Forwarded-Port``. When found, the value will be used to override the ``SERVER_PORT`` value in the WSGI ``environ`` dictionary. The header names accepted for specifying the client IP address are ``X-Forwarded-For`` and ``X-Real-IP``. When ``X-Forwarded-For`` is used then the first IP address listed in the header value will be used. For ``X-Real-IP`` only one IP address should be given. When found, the value will be used to override the ``REMOTE_ADDR`` value in the WSGI ``environ`` dictionary. Note that at present there is no facility for specifying a list of trusted IP addresses to be specified for front end proxies. This will be a feature added in a future version. When that is available and ``X-Forwarded-For`` is used, then the IP address preceding the furthest away trusted proxy IP address will instead be used, even if not the first in the list. The header names accepted for specifying the application mount point are ``X-Script-Name`` and ``X-Forwarded-Script-Name``. When found, the value will override the ``SCRIPT_NAME`` value in the ``WSGI`` environ dictionary. When using ``mod_wsgi-express`` the equivalent command line option is ``--trust-proxy-header``. The option can be used multiple times to specify more than one header. mod_wsgi-5.0.0/docs/release-notes/version-4.5.0.rst000066400000000000000000000010071452636074700217170ustar00rootroot00000000000000============= Version 4.5.0 ============= Version 4.5.0 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.0 New Features ------------ 1. Added additional internal performance monitoring features, included per request event mechanism for getting extended metrics on a per request basis. This includes details like per request CPU burn, which along with process level CPU burn and thread utilisation can be used to better tune processes/threads settings. mod_wsgi-5.0.0/docs/release-notes/version-4.5.1.rst000066400000000000000000000004431452636074700217230ustar00rootroot00000000000000============= Version 4.5.1 ============= Version 4.5.1 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.1 Bugs Fixed ---------- 1. The CPU user and system time for requests wasn't always being output in request finished event data. mod_wsgi-5.0.0/docs/release-notes/version-4.5.10.rst000066400000000000000000000033541452636074700220070ustar00rootroot00000000000000============== Version 4.5.10 ============== Version 4.5.10 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.10 Bugs Fixed ---------- * In version 4.5.9, the version number 4.5.8 was being incorrectly reported via ``mod_wsgi.version`` in the per request WSGI environ dictionary. * When using Anaconda Python on MacOS X, the Python shared library wasn't being resolved correctly due to changes in Anaconda Python, meaning it cannot be used in embedded systems which load Python via a dynamically loaded module, such as in Apache. When using ``mod_wsgi-express`` the Python shared library is now forcibly loaded before the mod_wsgi module is loaded in Apache. If doing manual Apache configration, you will need to add before the ``LoadModule`` line for ``wsgi_module``, a ``LoadFile`` directive which loads the Ananconda Python shared library by its full path from where it is located in the Anaconda Python ``lib`` directory. * Startup timeout wasn't being cancelled after succesful load of the WSGI script file and instead was only being done after first request had finished. This meant that if first request took longer than the startup timeout the process would be wrongly restarted. * Fix parsing of ``Content-Length`` header returned in daemon mode so that responses greater than 2GB in size could be returned. * Using incorrect header files in workaround to be able to compile mod_wsgi on MacOSX Sierra when using ``pip install``. Was using old MacOS X 10.6 SDK which are header files for Apache 2.2. Was running, but should not have worked at all. Possibility this still may not work or might break. No choice until Apple fixes their broken Xcode and Apache installation. mod_wsgi-5.0.0/docs/release-notes/version-4.5.11.rst000066400000000000000000000004731452636074700220070ustar00rootroot00000000000000============== Version 4.5.11 ============== Version 4.5.11 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.11 Bugs Fixed ---------- * The ``runmodwsgi`` option when using Django application integration would fail on older Django versions up to Django 1.7. mod_wsgi-5.0.0/docs/release-notes/version-4.5.12.rst000066400000000000000000000030501452636074700220020ustar00rootroot00000000000000============== Version 4.5.12 ============== Version 4.5.12 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.12 Bugs Fixed ---------- * When the ``pip install`` method is used to compile the module for Windows, the ``mod_wsgi-express module-config`` command was generating the wrong DLL path for ``LoadFile`` directive for Python 3.4, as well as possibly older Python versions. New Features ------------ * When using ``pip install`` on Windows, in addition to looking in the directory ``C:\Apache24`` for an Apache installation, it will now also check ``C:\Apache22`` and ``C:\Apache2``. It is recommended though that you use Apache 2.4. If your Apache installation is elsewhere, you can still set the ``MOD_WSGI_APACHE_ROOTDIR`` environment variable to its location. The environment variable should be set in your shell before running ``pip install mod_wsgi`` and should be set in a way that exports it to child processes run from the shell. * Added ``restart-interval`` option to ``WSGIDaemonProcess`` for restarting daemon mode processes after a set time. If ``graceful-timeout`` option is also specified, active requests will be given a chance to complete, while still accepting new requests. If within the grace period the process becomes idle, a shutdown will occur immediately. In the case of no grace period being specified, or the grace period expiring, the normal shutdown sequence will occur. The option is also available in ``mod_wsgi-express`` as ``--restart-interval``. mod_wsgi-5.0.0/docs/release-notes/version-4.5.13.rst000066400000000000000000000037771452636074700220230ustar00rootroot00000000000000============== Version 4.5.13 ============== Version 4.5.13 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.13 New Features ------------ * Added ``response-socket-timeout`` option to ``WSGIDaemonProcess`` directive to allow the timeout on writes back to HTTP client from Apache child worker process, when proxying responses from a mod_wsgi daemon process, to be separately overridden. Previously this would use the value of the Apache ``Timeout`` directive. With this change the timeout will be based on ``response-socket-timeout`` option. If that is not set it will use the the general ``socket-timeout`` option and if that isn't set only then will the value of the Apache ``Timeout`` directive be used. The overall purpose of being able to separately control this option is to combat against HTTP clients that never read the response, causing the response buffer when proxying to fill up, which in turn can cause the request thread in the daemon process to block. The default high value of the Apache ``Timeout`` directive, at 300 seconds meant it could take a while to clear, and if the mod_wsgi daemon processes were configured with a low total number of request threads, the whole WSGI application could block if this occurred for many requests at the same time. When using ``mod_wsgi-express`` the option can be set using the command line ``--response-socket-timeout`` option. If using ``mod_wsgi-express`` the default socket timeout is 60 seconds so the issue would not have had as big an impact, especially since ``mod_wsgi-express`` also defines a default request timeout of 60 seconds, which would have resulted in the daemon process being restarted if the request had blocked in returning the response. An additional error message is also now logged to indicate that failure to proxy the response content was due to a socket timeout. This will help to indentify where problems are due to a blocked connection or slow client. mod_wsgi-5.0.0/docs/release-notes/version-4.5.14.rst000066400000000000000000000017171452636074700220140ustar00rootroot00000000000000============== Version 4.5.14 ============== Version 4.5.14 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.14 Bugs Fixed ---------- * Using the ``--url-alias`` option to the ``runmodwsgi`` management command when integrating ``mod_wsgi-express`` with Django could fail with Python 3. This is because the type of the items passed in an option list could be tuple or list depending on Python version. It was necessary to add items with same type else sorting would break. New Features ------------ * Added a ``name`` attribute to the log object used in place of ``sys.stdout`` and ``sys.stderr``, and which is also used for ``wsgi.errors`` in the per request ``environ`` dictionary. This is because although the ``name`` attribute is not required to exist, one can find code out there that assumes it always does exist for file like objects. Adding the attribute ensures that such code doesn't fail. mod_wsgi-5.0.0/docs/release-notes/version-4.5.15.rst000066400000000000000000000006001452636074700220030ustar00rootroot00000000000000============== Version 4.5.15 ============== Version 4.5.15 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.15 Bugs Fixed ---------- * Incorrect version for mod_wsgi was being reported in server token. * On 32 bit platforms, when reading from request content, all input would be returned and the chunk size would be ignored. mod_wsgi-5.0.0/docs/release-notes/version-4.5.16.rst000066400000000000000000000031021452636074700220040ustar00rootroot00000000000000============== Version 4.5.16 ============== Version 4.5.16 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.16 Bugs Fixed ---------- * The ``WSGIDontWriteBytecode`` option wasn't available when using Python 3.3 and later. This feature of Python wasn't in initial Python 3 versions, but when was later added, mod_wsgi was updated to allow it. * The feature behind the ``startup-timeout`` option of ``WSGIDaemonProcess`` was broken by prior fix related to feature in 4.5.10. This meant the option was not resulting in daemon processes being restarted when the WSGI script file could not be loaded successfully by the specified timeout. * When using ``WSGIImportScript``, or ``WSGIScriptAlias`` with both the ``process-group`` and ``application-group`` options, with the intent of preloading a WSGI script file, the ability to reach across to a daemon process defined in a different virtual host with same ``ServerName`` was always failing and the target daemon process group would be flagged as not accessible when instead it should have been. New Features ------------ * Added ``--allow-override`` option to ``mod_wsgi-express`` to allow use of a ``.htaccess`` in document root directory and any directories mapped using a URL alias. The argument to the directive should be the directive type which can be overridden in the ``.htaccess`` file. The option can be used more than once if needing to allow overriding of more than one directive type. Argument can be anything allowed by ``AllowOverride`` directive. mod_wsgi-5.0.0/docs/release-notes/version-4.5.17.rst000066400000000000000000000004631452636074700220140ustar00rootroot00000000000000============== Version 4.5.17 ============== Version 4.5.17 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.17 Bugs Fixed ---------- * Addition in ``mod_wsgi-express`` of ``--allow-override`` option in 4.5.16 caused ``--url-alias`` option to break. mod_wsgi-5.0.0/docs/release-notes/version-4.5.18.rst000066400000000000000000000010531452636074700220110ustar00rootroot00000000000000============== Version 4.5.18 ============== Version 4.5.18 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.18 Features Changed ---------------- * When using ``--url-alias`` with ``mod_wsgi-express`` and the target of the URL doesn't exist, it will now be assumed that it will be a directory rather than a file, when finally created. This is to accomodate where may have used ``--setup-only`` option or ``setup-server`` command to pre-generate config files before the directory is created. mod_wsgi-5.0.0/docs/release-notes/version-4.5.19.rst000066400000000000000000000010651452636074700220150ustar00rootroot00000000000000============== Version 4.5.19 ============== Version 4.5.19 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.19 Features Changed ---------------- * When using the Django management command integration of ``mod_wsgi-express``, allow the ``--working-directory`` option to override the calculated directory. This is necessary to cope with where the meaning of ``BASE_DIR`` in the Django settings file has been changed from the accepted convention of it being the parent directory of the Django project. mod_wsgi-5.0.0/docs/release-notes/version-4.5.2.rst000066400000000000000000000006131452636074700217230ustar00rootroot00000000000000============= Version 4.5.2 ============= Version 4.5.2 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.2 Bugs Fixed ---------- 1. When using ``--debug-mode`` with ``mod_wsgi-express`` any additional directories to search for Python modules, which were supplied by the ``--python-path`` option, were not being added to ``sys.path``. mod_wsgi-5.0.0/docs/release-notes/version-4.5.20.rst000066400000000000000000000004361452636074700220060ustar00rootroot00000000000000============== Version 4.5.20 ============== Version 4.5.20 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.20 Bugs Fixed ---------- * Installation on MacOS X using ``setup.py`` or ``pip`` would fail if Xcode 9.0 was installed. mod_wsgi-5.0.0/docs/release-notes/version-4.5.21.rst000066400000000000000000000037251452636074700220130ustar00rootroot00000000000000============== Version 4.5.21 ============== Version 4.5.21 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.21 Bugs Fixed ---------- * Installation using ``pip`` or ``setup.py`` install was failing on MacOS X High Sierra with latest Xcode as Apple doesn't even include the ``apxs`` program at all. Note you still cannot use the configure/make/make install method of deploying mod_wsgi to MacOS X. You need to use the ``pip install`` method. * Speculated that crashes on daemon process shutdown were being caused by a race condition around accessing Python C API when interpreter was being destroyed. There was a check in place to avoid this but may not have been robust enough depending on how memory cache worked for threads running across multi core machine. Now use a dedicated thread mutex to avoid race condition between main process thread and Python interpreter deadlock detection thread. Features Changed ---------------- * Set ``wsgi.input_terminated`` to ``True`` in WSGI environment. This is a unofficial extension to WSGI specification proposed by Armin Ronacher for a WSGI server/middleware to flag that it is safe to read to the end of input and that ``CONTENT_LENGTH`` can be ignored. This is to be able to support chunked request content, but also anything which mutates the request content length but which can't easily change the ``CONTENT_LENGTH``, such as occurs when request content is compressed and is decompressed by the Apache web server. The ability to safely read until end of input was always present in mod_wsgi, but there was no way in the WSGI specification for a WSGI server to tell a WSGI application this was the case. Prior attempts to include something to deal with this in the WSGI specification when it was updated in PEP 3333 were ignored. This is why now an unofficial way of doing it is being adopted by WSGI servers separate to the WSGI specification. mod_wsgi-5.0.0/docs/release-notes/version-4.5.22.rst000066400000000000000000000004511452636074700220050ustar00rootroot00000000000000============== Version 4.5.22 ============== Version 4.5.22 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.22 Bugs Fixed ---------- * Change in version 4.5.21 caused Windows builds to break with undefined symbol ``wsgi_daemon_shutdown``. mod_wsgi-5.0.0/docs/release-notes/version-4.5.23.rst000066400000000000000000000005531452636074700220110ustar00rootroot00000000000000============== Version 4.5.23 ============== Version 4.5.23 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.23 Bugs Fixed ---------- * Incorrect check around whether ``apxs`` was present on system would result in ``pip`` install failing on Windows, and possibly also when using latest Xcode on MacOS X. mod_wsgi-5.0.0/docs/release-notes/version-4.5.24.rst000066400000000000000000000013111452636074700220030ustar00rootroot00000000000000============== Version 4.5.24 ============== Version 4.5.24 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.24 Bugs Fixed ---------- * Using mod_wsgi in daemon mode on Solaris would cause a process hang or max out CPU usage. Caused by change of variable type to unsigned to get rid of compiler warnings, without fixing how condition check using variable was done. Problem could also affect non Solaris systems if total number of HTTP headers and other variables passed in WSGI environ was greater than 1024. Affected Solaris all the time due to it having a limit of only 16 in operating system for same code, meaning hit problem immediately. mod_wsgi-5.0.0/docs/release-notes/version-4.5.3.rst000066400000000000000000000004041452636074700217220ustar00rootroot00000000000000============= Version 4.5.3 ============= Version 4.5.3 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.3 Bugs Fixed ---------- 1. Ensure that startup messages are flushed so immediately visible in logs. mod_wsgi-5.0.0/docs/release-notes/version-4.5.4.rst000066400000000000000000000033151452636074700217270ustar00rootroot00000000000000============= Version 4.5.4 ============= Version 4.5.4 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.4 Bugs Fixed ---------- 1. When using Apache 2.4 and daemon mode, the connection and request log IDs from the Apache child worker processes were not being copied across to the daemon process so that log messages generated against the request would use the same ID in logs when using the ``%L`` format modifier. 2. When using Apache 2.4 and daemon mode, the remote client port information was not being cached such that log messages generated against the request would use the port in logs when using the ``%a`` format modifier. Features Changed ---------------- 1. If ``sys.stdout`` and ``sys.stderr`` are used in the context of the thread handling a request, calls against them to log messages will be routed back via ``wsgi.errors`` from the per request WSGI ``environ`` dictionary. This avoids the danger of logged messages from different request handlers being intermixed as buffering will now be done on a per request basis. Such messages will also be logged with the correct connection and request log ID if the ``%L`` formatter is used in the error log format. New Features ------------ 1. Added new option ``--error-log-format`` to ``mod_wsgi-express`` to allow the error log message format to be specified. 2. Pass through to the WSGI per request ``environ`` dictionary new values for ``mod_wsgi.connection_id`` and ``mod_wsgi.request_id``. These are the Apache log IDs for the connection and request that it uses in log messages when using the ``%L`` format modifier. This only applies to Apache 2.4 and later. mod_wsgi-5.0.0/docs/release-notes/version-4.5.5.rst000066400000000000000000000010451452636074700217260ustar00rootroot00000000000000============= Version 4.5.5 ============= Version 4.5.5 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.5 Features Changed ---------------- 1. Reverted the change in 4.5.4 which associated any messages logged via ``sys.stdout`` and ``sys.stderr`` back to the request so that Apache could log them with the correct request log ID. This was necessary as the change was causing process crashes under Python 3. The feature will be reinstated when a solution to the issue can be found. mod_wsgi-5.0.0/docs/release-notes/version-4.5.6.rst000066400000000000000000000056661452636074700217440ustar00rootroot00000000000000============= Version 4.5.6 ============= Version 4.5.6 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.6 Bugs Fixed ---------- 1. Reinstanted change to associate any messages logged via ``sys.stdout`` and ``sys.stderr`` back to the request so that Apache can log them with the correct request log ID. This change was added in 4.5.4, but was reverted in 4.5.5 as the change was causing process crashes under Python 3. 2. When using Apache 2.4 use new style ``Require`` directive instead of older ``Order`` and ``Allow`` when setting up access controls for ``mod_wsgi-express``. This fixes a problem where when using ``--include-file`` and ``Require`` directive was being used. Precedence order was such that older directives were overriding new directive and it was possible to permit access to additional directories when using custom configuration. 3. Django 1.10 requires that management commands use argparse style options but ``mod_wsgi-express`` uses optparse style options. Can no longer simply merge main script option list to get management command option list. Instead need to convert optparse list to argparse format on the fly, as still need to retain main script option list as optparse until drop Python 2.6 support. Changes stop ``runmodwsgi`` management command failing when using Django 1.10+. New Features ------------ 1. Added ``startup-timeout`` option to ``WSGIDaemonProcess`` directive. If set and the first loading of the WSGI application script file fails, then if no subsequent attempt to load it succeeds within the specified startup timeout, the daemon process will be restarted. When configuring mod_wsgi directly, the option is not enabled by default. The option is exposed via ``mod_wsgi-express`` with a default value of 15 seconds. This would be used where running the Django web framework and there is a risk of the database not being available, causing Django initialisation to fail. Django doesn't allow initialisation to be performed a second time in the same process, meaning it will then constantly fail. Use of startup timeout will allow the process to be restarted in face of such constant startup failures. If the database is available when the process is restarted, then next time the process starts, everything should be fine. Do note that this option should preferably only be used where the one WSGI application has been delegated to a WSGI daemon process. This is because if multiple WSGI applications are hosted out of the daemon process group, be they in the same application group or distinct ones, as soon as any one of them loads successfully, then the startup timeout is disabled, meaning that if a subsequent one loaded is constantly failing, then a process restart will not occur. Best practice is to delegate each WSGI application to a distinct daemon process group. mod_wsgi-5.0.0/docs/release-notes/version-4.5.7.rst000066400000000000000000000017061452636074700217340ustar00rootroot00000000000000============= Version 4.5.7 ============= Version 4.5.7 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.7 Bugs Fixed ---------- 1. Resolved problem whereby mod_wsgi would fail on startup when using Anaconda Python. This was caused by Anaconda Python changing the behaviour of the C API function ``Py_GetVersion()`` so that it can no longer be called before the Python interpreter is initialised. Now display only the Python major and minor version in server string from time of compilation, rather than runtime. Also no longer log warning about mismatches between compile time and runtime Python version. This avoids need to call ``Py_GetVersion()``. New Features ------------ 1. Add ``--http2`` option to ``mod_wsgi-express`` for enabling support of HTTP/2. Requires the ``mod_http2`` module to be compiled into Apache httpd server for versions of Apache where that is available. mod_wsgi-5.0.0/docs/release-notes/version-4.5.8.rst000066400000000000000000000064571452636074700217450ustar00rootroot00000000000000============= Version 4.5.8 ============= Version 4.5.8 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.8 Bugs Fixed ---------- * When using HTTP/2 support and ``wsgi.file_wrapper``, the response could be truncated when ``mod_h2`` was deferring the sending of the response until after the WSGI request had been finalized. * Builds were failing on Windows. Insert appropriate ``#if`` conditional around code which shouldn't have been getting included on Windows. * When ``mod_wsgi-express`` is run as ``root`` and ``--python-eggs`` option is used, if the directory for the Python eggs didn't exist, it was created, but the ownership/group were not set to be the user and group that Apache would run the WSGI application. As a result Python eggs could not actually be unpacked into the directory. Now change the ownership/group of the directory to user/group specified when ``mod_wsgi-express`` was run. * Installation on MacOS X Sierra fails for both CMMI and ``pip install`` methods. This is because Apple removed ``apr-1-config`` and ``apu-1-config`` tools needed by ``apxs`` to install third party Apache module. A workaround has been incorporated so that installation still works when using ``pip install``, but there is no workaround for CMMI method. You will need to use ``pip install`` method and then use ``mod_wsgi-express module-config`` to get the configuration to then add into the Apache configuration so it knows how to load the mod_wsgi module. Then configure Apache so it knows about your WSGI application. * Compilation would fail on MacOS X Sierra as the API was changed for obtaining task information. This was used to get memory used by the process. New Features ------------ * Add ``WSGIIgnoreActivity`` directive. This can be set to ``On`` inside of a ``Location`` directive block for a specific URL path, and any requests against matching URLs will not trigger a reset of the inactivity timeout for a mod_wsgi daemon process. This can be used on health check URLs so that periodic requests against the health check URL do not interfere with the inactivity timeout and keep the process running, rather than allowing the process to restart due to being otherwise idle. * Added the ``--ignore-activity`` option to ``mod_wsgi-express``. It will set the ``WSGIIgnoreActivity`` directive to ``On`` for the specific URL path passed as argument to the option. Any requests against the matching URL path will not trigger a reset of the inactivity timeout for a mod_wsgi daemon process. * Added the ``--module-config`` option to ``mod_wsgi-express`` to get the Apache configuration snippet you would use to load the mod_wsgi module from the Python installation direct into Apache, rather than installing the module into the Apache modules directory. * Added experimental support for installing mod_wsgi on Windows using ``pip``. Is only tested with Apache 2.4 and Python 3.5. The Apache installation must be installed in ``C:\Apache24`` directory. Run ``pip install mod_wsgi``. The run ``mod_wsgi-express module-config`` and it will generate the required configuration to add into the Apache configuration file to load the mod_wsgi module. You still need to separately configure Apache for your specific WSGI application. mod_wsgi-5.0.0/docs/release-notes/version-4.5.9.rst000066400000000000000000000005401452636074700217310ustar00rootroot00000000000000============= Version 4.5.9 ============= Version 4.5.9 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.5.9 Bugs Fixed ---------- * Revert ``apachectl`` script generated by ``mod_wsgi-express`` back to using ``/bin/bash`` as ``/bin/sh`` on some Linux systems lacking ability to do ``exec -a``. mod_wsgi-5.0.0/docs/release-notes/version-4.6.0.rst000066400000000000000000000260261452636074700217300ustar00rootroot00000000000000============= Version 4.6.0 ============= Version 4.6.0 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.0 Bugs Fixed ---------- * Management of reference counting on Python objects in the access, authentication, authorization and dispatch hooks wasn't correct for certain error cases. The error cases shouldn't have ever occurred, but still fixed. * Point at which details of Python exceptions occuring during access, authentication, authorization and dispatch hooks was incorrect and not done, with exception cleared, before trying to close per callback error log. That the exception hadn't been cleared would result in the call to close the per callback error log to itself fail as it believed an exception occurred in that call when it hadn't. The result was confusing error messages in the Apache error log. * The deprecated backwards compatability mode enabled by setting the directive ``WSGILazyInitialization Off``, to have Python initialised in the Apache parent process before forking, was resulting in the Apache parent process crashing on Apache shutdown or restart. This resulted in Apache child processes and daemon process being orphaned. Issue has been fixed, but you should never use this mode and it will be removed in a future update. The reason it shouldn't be used is due to memory leaks in Python interpreter re-initialisation in same process and also the risks due to Python code potentially being run as root. * When stack traces were being dumped upon request timeout expiring, the line numbers of the definition of each function in the stack trace was being displayed, instead of the actual line number within the body of the function that was executing at the time. * When stack traces were being dumped upon request timeout expiring, the thread ID was being truncated to 32 bits when displayed, meaning it wouldn't match the actual Python thread ID on 64 bit systems. Features Changed ---------------- * Now flagging mod_wsgi package when installing using ``setup.py`` as being not ``zip_safe``. This is to workaround an apparent bug with ``setuptools`` when using Python 3.7 alpha versions. Believe this will disable use of egg file in certain cases. * When the connection to a client is lost when writing back the response, the HTTP response code logged in the Apache access log will be that for the original response from the WSGI application rather than a 500 error. This is done to avoid confusion where a 500 error is recorded in the access log, making you think your WSGI application is at fault when it wasn't, but there is no actual error recorded in the error log as to why the 500 error was recorded in the access log. The reason no error is logged in the case of the connection to a client being lost is that doing so would create a lot of noise due to the regularity which it can happen. The only time an error is logged is when a timeout occurs rather than connection being lost. That is done to highlight that connections are hanging due to the effect it can have on available server capacity when connections are kept open for long times. Thanks to Jesús Cea Avión for identifying how using the Apache C API it could be identified that the connection had been aborted and in that case the original HTTP response code could safely be used. * When using the Django integration for ``mod_wsgi-express``, if the ``whitenoise.middleware.WhiteNoiseMiddleware`` middleware is listed in ``MIDDLEWARE`` or ``MIDDLEWARE_CLASSES`` of the Django settings file, Apache will now not be used to host Django's static files. This is being done to allow WhiteNoise middleware to be used in conjunction with front end content delivery networks or other caching systems. If you aren't using such a front end and do want Apache to still host the static files, either don't list the WhiteNoise middleware in the list of middleware classes when using ``mod_wsgi-express``, or pass the ``--url-alias`` option explictly, along with the URL mount point for static files and the directory where they have been placed by the ``collectstatic`` management command of Django. * When running ``mod_wsgi-express`` if the ``TMPDIR`` environment variable is specified, it will be used as the directory under which the default server root directory for generated files will be created. If ``TMPDIR`` is not specified, then ``/tmp`` will be used. This allows ``TMPDIR`` to be used to control the directory used as a default. On MacOS where ``TMPDIR`` is set to a unique directory for the login session under ``/var/tmp``, this also avoids a problem where a system cron job in MacOS will delete files under ``/tmp`` which are older than a certain date, which can cause a long running instance of ``mod_wsgi-express`` to start failing. * The "process_stopping" event previously would not be delivered when the process was being shutdown and there were still active requests, such as when a request timeout occurred. Seen as better to always deliver the event if can, even if there were still requests that hadn't been completed. This will allow the event handler to dump out details on what the active requests were, helping to identify long running or stuck requests. New Features ------------ * When using ``--compress-responses`` option of ``mod_wsgi-express``, content of type ``application/json`` will now be compressed. * Added directive ``WSGISocketRotation`` to allow the rotation of the daemon socket file path on restarts of Apache to be disabled. By default it is ``On`` to preserve existing behaviour but can be set to ``Off`` to have the same socket file path always be used for lifetime of that Apache instance. Rotation should only be disabled where the Apache configuration for the mod_wsgi application stays constant over time. The rotation was originally done to prevent a request received and handled by an Apache worker process being proxied through to a daemon process created under a newer configuration. This was done to avoid the possibility of an error, or a security issue, due to the old and new configurations being incompatible or out of sync. By setting rotation to ``Off``, when a graceful restart is done and the Apache worker process survives for a period of time due to keep alive connections, those subsequent requests on the keep alive connection will now be proxied to the newer daemon processes rather than being failed as occurred before due to no instances of daemon process existing under the older configuration. Although socket rotation still defaults to ``On`` for mod_wsgi, this is overridden for ``mod_wsgi-express`` where it is always now set to ``Off``. This is okay as is not possible for configuration to change when using it. * The ``process-group`` and ``application-group`` options can now be used with the ``WSGIScriptAliasMatch`` directive. If substitutions are not used in the value for the WSGI script file target path, then the WSGI script file will be pre-loaded if both ``process-group`` and ``application-group`` options are used at the same time. Note that the documentation was wrongly updated recently to suggest that these options were already supported by ``WSGIScriptAliaMatch``. This was done in error. Instead of removing the documentation, the ability to use the options with the directive was instead added with this release. * Raise an actual exception when installing using ``pip`` or using the ``setup.py`` file on MacOS and it doesn't appear that Xcode application has been installed. Lack of Xcode application will mean that cannot find the SDK which has the Apache include files. * An explicit error message is now logged when the calculated daemon socket path is too long and would be truncated, causing potential failures. A shorter directory path should be set with the ``WSGISocketPrefix`` option. * Added the ``--socket-path`` option to ``mod_wsgi-express`` so you can set the daemon socket prefix via the ``WSGISocketPrefix`` directive to an alternate directory if the calculated path would be too long based on where server root is set for ``mod_wsgi-express``. * Added the ``--isatty`` option to ``mod_wsgi-express`` to indicate that running the command in an interactive terminal session. In this case Apache will be run as a sub process rather than it replacing the current script. Signals such as SIGINT, SIGTERM, SIGHUP and SIGUSR1 will be intercepted and forwarded onto Apache, but the signal SIGWINCH will be ignored. This will avoid the problems of Apache shutting down when the terminal session Apache is run in is resized. Technically this could be done automatically by working out if the attached terminal is a tty, but is being done using an option at this point so the reliability of the mechanism used to run Apache as a sub process and the handling of the signals, can be verified. If everything checks out, it is likely that this will become the default behaviour when the attached terminal is a tty. * When using ``WSGIDaemonProcess``, if you set the number of threads to zero you will enable a special mode intended for using a daemon process to run a managed task or program. You will need to use ``WSGIImportScript`` to pre-load a Python script into the main application group specified by ``%{GLOBAL}`` where the script runs a never ending task, or does an exec to run an external program. If the script or external program exits, the process is shutdown and replaced with a new one. For the case of using a Python script to run a never ending task, a ``SystemExit`` exception will be injected when a signal is received to shutdown the process. You can use ``signal.signal()`` to register a signal handler for ``SIGTERM`` if needing to run special actions before then exiting the process using ``sys.exit()``, or to signal your own threads to exit any processing so you can shutdown in an orderly manner. The ability to do something very similar did previously exist in that you could use ``WSGIImportScript`` to run a never ending task even when the number of threads was non zero. This was used by ``--service-script`` option of ``mod_wsgi-express``. The difference in setting ``threads=0`` is that signals will work correctly and be able to interupt the script. Also once the script exits, the process will shutdown, to be replaced, where as previously the process would stay running until Apache was restart or shutdown. The ``--service-script`` option of ``mod_wsgi-express`` has been updated to set the number of threads to zero. * Added ``mod_wsgi.active_requests`` dictionary. This is populated with the per request data object for active requests, keyed by the Apache request ID. * Add ``--cpu-time-limit`` option to ``mod_wsgi-express`` so that limit can be imposed on daemon process group as to how much CPU can be used for process is restarted automatically. * Pass a "shutdown_reason" argument with "process_stopping" event so event handler knows the reason the process is being shutdown. mod_wsgi-5.0.0/docs/release-notes/version-4.6.1.rst000066400000000000000000000004671452636074700217320ustar00rootroot00000000000000============= Version 4.6.1 ============= Version 4.6.1 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.1 Bugs Fixed ---------- * APR version 1.4.X on RHEL/CentOS doesn't have ``apr_hash_this_key()`` function. Swap to using ``apr_hash_this()`` instead. mod_wsgi-5.0.0/docs/release-notes/version-4.6.2.rst000066400000000000000000000004661452636074700217320ustar00rootroot00000000000000============= Version 4.6.2 ============= Version 4.6.2 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.2 Bugs Fixed ---------- * Full details of error not logged when a Python script file could not be loaded due to a failure when parsing Python code. mod_wsgi-5.0.0/docs/release-notes/version-4.6.3.rst000066400000000000000000000025021452636074700217240ustar00rootroot00000000000000============= Version 4.6.3 ============= Version 4.6.3 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.3 Bugs Fixed ---------- * When compiled for Python 2.6, when run mod_wsgi would fail to load into Apache due to misisng symbol ``PyFrame_GetLineNumber``. This was only introduced in Python 2.7. Use alternate way to get line number which still yields correct answer. This issue was introduced in mod_wsgi version 4.6.0 in fix to have correct line numbers generated for stack traces on shutdown due to request timeout. * Installing mod_wsgi on Windows would fail as hadn't exclude mod_wsgi daemon mode specific code from Windows build. This would result in compile time error about ``wsgi_daemon_process`` being undefined. This problem was introduced to Windows in version 4.6.0. * When using ``runmodwsgi`` management command integration for Django, the file containing the WSGI application entry point was specified via a full filesystem path, rather than by module import path. This meant that relative imports from that file would fail. The file is now imported as a module path based on what ``WSGI_APPLICATION`` is set to in the Django settings module. This means the file is imported as part of package for the project and relative imports will work. mod_wsgi-5.0.0/docs/release-notes/version-4.6.4.rst000066400000000000000000000020661452636074700217320ustar00rootroot00000000000000============= Version 4.6.4 ============= Version 4.6.4 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.4 Bugs Fixed ---------- * In more recent Python versions, the config directory in the Python installation incorporates the platform name. This directory was added as an additional directory to search for Python shared libraries when installing using the ``setup.py`` file or ``pip``. It should not even be needed for newer Python versions but still check for older Python versions. The only issue arising from the wrong directory, not incorporating the platform name, being used, was a linker warning about the directory not being present. * Installing mod_wsgi on Windows would fail as hadn't exclude mod_wsgi daemon mode specific code from Windows build. This would result in compile time error about ``wsgi_daemon_process`` being undefined. This problem was introduced to Windows in version 4.6.0. A prior attempt to fix this in 4.6.3 missed one place in the code which needed to be changed. mod_wsgi-5.0.0/docs/release-notes/version-4.6.5.rst000066400000000000000000000021771452636074700217360ustar00rootroot00000000000000============= Version 4.6.5 ============= Version 4.6.5 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.5 Bugs Fixed ---------- * When running ``mod_wsgi-express`` and serving up static files from the document root, and the WSGI application was mounted at a sub URL using ``--mount-point``, the static files in the document root outside of the mount point for the WSGI application would no longer be accessible. * If no system mime types file can be found, fall back to ``/dev/null`` so that Apache can still at least start up. Features Changed ---------------- * On macOS, use ``/var/tmp`` as default parent directory for server root directory rather than value of ``$TMPDIR``. The latter can produce a path which is too long and UNIX socket cannot be written there. New Features ------------ * Now possible to use ``mod_wsgi-express`` in an a ``zipapp`` created using ``shiv``. This entailed a special workaround to detect when ``shiv`` was used, so that the unpacked ``site-packages`` directory could be added to the Python module search path for ``mod_wsgi-express``. mod_wsgi-5.0.0/docs/release-notes/version-4.6.6.rst000066400000000000000000000014541452636074700217340ustar00rootroot00000000000000============= Version 4.6.6 ============= Version 4.6.6 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.6 Bugs Fixed ---------- * Fix compilation failures when using Python 3.8. Features Changed ---------------- * When running ``mod_wsgi-express`` it will do a search for the location of ``bash`` and ``sh`` when defining the shell to use for the generated ``apachectl``. The shell used can be overridden using ``--shell-executable`` option. This is to get around issue with FreeBSD not having ``/bin/bash``. New Features ------------ * The Apache request ID is accessible in request events as ``request_id``. * The per request data dictionary accessible using ``mod_wsgi.request_data()`` is now also accessible in events as ``request_data``. mod_wsgi-5.0.0/docs/release-notes/version-4.6.7.rst000066400000000000000000000005131452636074700217300ustar00rootroot00000000000000============= Version 4.6.7 ============= Version 4.6.7 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.7 Bugs Fixed ---------- * Fix Windows build errors due to Python 3.7+ not providing empty function stubs for ``PyOS_AfterFork_Child()`` and ``PyOS_AfterFork_Parent()``. mod_wsgi-5.0.0/docs/release-notes/version-4.6.8.rst000066400000000000000000000011631452636074700217330ustar00rootroot00000000000000============= Version 4.6.8 ============= Version 4.6.8 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.6.8 Bugs Fixed ---------- * When the queue timeout was triggered for requests sent to daemon mode processes, the error response wasn't being flushed out correctly resulting in the connection still being held up to the time of the socket timeout. New Features ------------ * Add ``--enable-sendfile`` option to ``mod_wsgi-express``. Should only be used where the operating system kernel supports ``sendfile()`` for the file system type where files are hosted. mod_wsgi-5.0.0/docs/release-notes/version-4.7.0.rst000066400000000000000000000024131452636074700217230ustar00rootroot00000000000000============= Version 4.7.0 ============= Version 4.7.0 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.7.0 New Features ------------ * Now releasing parallel ``mod_wsgi-standalone`` package to PyPi. This is the same as the ``mod_wsgi`` package, except that by installing the ``mod_wsgi-standalone`` package, it will automatically trigger the ``mod_wsgi-httpd`` package to install the Apache HTTPD server as part of your Python installation. When you run ``mod_wsgi-express`` it will use that Apache HTTPD server installation. The ``mod_wsgi-standalone`` package is required where you need to install ``mod_wsgi-express`` using its own Apache HTTPD installation due to no system Apache HTTPD server package being available, and the installation needs to be done using a ``requirements.txt`` file for ``pip`` or other package install manager. Using ``mod_wsgi-standalone`` will ensure that the ``mod_wsgi-httpd`` package is installed first before attempting to build and install mod_wsgi. This guarantee is not provided by ``pip`` if you list ``mod_wsgi-httpd`` and ``mod_wsgi`` packages as two entries. The version numbering of the ``mod_wsgi-standalone`` package will follow the ``mod_wsgi`` versioning. mod_wsgi-5.0.0/docs/release-notes/version-4.7.1.rst000066400000000000000000000006001452636074700217200ustar00rootroot00000000000000============= Version 4.7.1 ============= Version 4.7.1 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.7.1 Bugs Fixed ---------- * Fix up installation on Windows into a virtual environment when using latest ``virtualenv`` version, or recent Python versions with the bundled ``venv`` module for creating virtual environments. mod_wsgi-5.0.0/docs/release-notes/version-4.8.0.rst000066400000000000000000000151461452636074700217330ustar00rootroot00000000000000============= Version 4.8.0 ============= Version 4.8.0 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.8.0 Bugs Fixed ---------- * Fixed potential for process crash on Apache startup when the WSGI script file or other Python script file were being preloaded. This was triggered when ``WSGIImportScript`` was used, or if ``WSGIScriptAlias`` or ``WSGIScriptAliasMatch`` were used and both the ``process-group`` and ``application-group`` options were used with those directives. The potential for this problem arising was extremely high on Alpine Linux, but seem to be very rare on a full Linux of macOS distribution where glibc was being used. * Include a potential workaround so that virtual environment work on Windows. Use of virtual environments in embedded systems on Windows has been broken ever since ``python -m venv`` was introduced. Initially ``virtualenv`` was not affected, although when it changed to use the new style Python virtual environment layout the same as ``python -m venv`` it also broke. This was with the introduction of about ``virtualenv`` version 20.0.0. The underlying cause is lack of support for using virtual environments in CPython for the new style virtual environments. The bug has existed in CPython since back in 2014 and has not been fixed. For details of the issue see https://bugs.python.org/issue22213. For non Window systems a workaround had been used to resolve the problem, but the same workaround has never worked on Windows. The change in this version tries a different workaround for Windows environments. * Added a workaround for the fact that Python doesn't actually set the ``_main_thread`` attribute of the ``threading`` module to the main thread which initialized the main interpreter or sub interpreter, but the first thread that imports the ``threading`` module. In an embedded system such as mod_wsgi it could be a request thread, not the main thread, that would import the ``threading`` module. This issue was causing the ``asgiref`` module used in Django to fail when using ``signal.set_wakeup_fd()`` as code was thinking it was in the main thread when it wasn't. See https://github.com/django/asgiref/issues/143. * Using ``WSGILazyInitialization Off`` would cause Python to abort the Apache parent process. The issue has been resolved, but you are warned that you should not be using this option anyway as it is dangerous and opens up security holes with the potential for user code to run as the ``root`` user when Python is initialized. * Fix a Python deprecation warning for ``PyArg_ParseTuple()`` which would cause the process to crash when deprecation warnings were turned on globally for an application. Crash was occuring whenever anything was output to Apache error log via ``print()``. Features Changed ---------------- * The ``--isatty`` option of mod_wsgi-express has been removed and the behaviour enabled by the option is now the default. The default behaviour is now that if mod_wsgi-express is run in an interactive terminal, then Apache will be started within a sub process of the mod_wsgi-express script and the ``SIGWINCH`` signal will be blocked and not passed through to Apache. This means that a window resizing event will no longer cause mod_wsgi-express to shutdown unexpectedly. * When trying to set resource limits and they can't be set, the system error number will now be included in the error message. New Features ------------ * Added the ``mod_wsgi.subscribe_shutdown()`` function for registering a callback to be called when the process is being shutdown. This is needed because ``atexit.register()`` doesn't work as required for the main Python interpreter, specifically the ``atexit`` callback isn't called before the main interpreter thread attempts to wait on threads on shutdown, thus preventing one from shutting down daemon threads and waiting on them. This feature to get a callback on process shutdown was previously available by using ``mod_wsgi.subscribe_events()``, but that would also reports events to the callback on requests as they happen, thus adding extra overhead if not using the request events. The new registration function can thus be used where only interested in the event for the process being shutdown. * Added an ``--embedded-mode`` option to mod_wsgi-express to make it easier to force it into embedded mode for high throughput, CPU bound applications with minimal response times. In this case the number of Apache child worker processes used for embedded mode will be dictated by the ``--processes`` and ``--threads`` option, completely overriding any automatic mechanism to set those parameters. Any auto scaling done by Apache for the child worker processes will also be disabled. This gives preference to using Apache worker MPM instead of event MPM, as event MPM doesn't work correctly when told to run with less than three threads per process. You can switch back to using event MPM by using the ``--server-mpm`` option, but will need to ensure that have three threads per process or more. * Locking of the Python global interpreter lock has been reviewed with changes resulting in a reduction in overhead, or otherwise changing the interaction between threads such that at high request rate with a hello world application, a greater request throughput can be achieved. How much improvement you see with your own applications will depend on what your application does and whether you have short response times to begin with. If you have an I/O bound application with long response times you likely aren't going to see any difference. * Internal metrics collection has been improved with additional information provided in process metrics and a new request metrics feature added giving access to aggregrated metrics over the time of a reporting period. This includes bucketed time data on requests so can calculate distribution of server, queue and application time. Note that the new request metrics is still a work in progress and may be modified or enhanced, causing breaking changes in the format of data returned. * Hidden experimental support for running ``mod_wsgi-express start-server`` on Windows. It will not show in list of sub commands ``mod_wsgi-express`` accepts on Windows, but it is there. There are still various issues that need to be sorted out but need assistance from someone who knows more about programming Python on Windows and Windows programming in general to get it all working properly. If you are interested in helping, reach out on the mod_wsgi mailing list. mod_wsgi-5.0.0/docs/release-notes/version-4.9.0.rst000066400000000000000000000066121452636074700217320ustar00rootroot00000000000000============= Version 4.9.0 ============= Version 4.9.0 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.9.0 Bugs Fixed ---------- * The mod_wsgi code wouldn't compile on Python 3.10 as various Python C API functions were removed. Note that the changes required switching to alternate C APIs. The changes were made for all Python versions back to Python 3.6 and were not conditional on Python 3.10+ being used. This is why the minor version got bumped. * When using CMMI (configure/make/make install) method for compiling mod_wsgi if embedded mode was being disabled at compile time, compilation would fail. * When ``maximum-requests`` option was used with mod_wsgi daemon mode, and a graceful restart signal was sent to the daemon process while there was an active request, the process would only shutdown when the graceful timeout period had expired, and not as soon as any active requests had completed, if that had occurred before the graceful timeout had expired. * When using the ``startup-timeout`` and ``restart-interval`` options of ``WSGIDaemonProcess`` directive together, checking for the expiration time of the startup time was done incorrectly, resulting in process restart being delayed if startup had failed. At worst case this was the lessor of the time periods specified by the options ``restart-interval``, ``deadlock-timeout``, ``graceful-timeout`` and ``eviction-timeout``. If ``request-timeout`` were defined it would however still be calculated correctly. As ``request-timeout`` was by default defined when using ``mod_wsgi-express``, this issue only usually affect mod_wsgi when manually configuring Apache. Features Changed ---------------- * Historically when using embedded mode, ``wsgi.multithread`` in the WSGI ``environ`` dictionary has reported ``True`` when any multithread capable Apache MPM were used (eg., worker, event), even if the current number of configured threads per child process was overridden to be 1. Why this was the case has been forgotten, but generally wouldn't matter since no one would ever set up Apache with a mulithread MPM and then configure the number of threads to be 1. If that was desired then ``prefork`` MPM would be used. With ``mod_wsgi-express`` since 4.8.0 making it much easier to use embedded mode and have a sane configuration used, since it is generated for you, the value of ``wsgi.multithread`` has been changed such that it will now correctly report ``False`` if using embedded mode, a multithread capable MPM is used, but the number of configured threads is set to 1. * The ``graceful-timeout`` option for ``WSGIDaemonProcess`` now defaults to 15 seconds. This was always the case when ``mod_wsgi-express`` was used but the default was never applied back to the case where mod_wsgi was being configured manually. A default of 15 seconds for ``graceful-timeout`` is being added to avoid the problem where sending a SIGUSR1 to a daemon mode process would never see the process shutdown due to there never being a time when there were no active requests. This might occur when there were a stuck request that never completed, or numerous long running requests which always overlapped in time meaning the process was never idle. You can still force ``graceful-timeout`` to be 0 to restore the original behaviour, but that is probably not recommended. mod_wsgi-5.0.0/docs/release-notes/version-4.9.1.rst000066400000000000000000000113641452636074700217330ustar00rootroot00000000000000============= Version 4.9.1 ============= Version 4.9.1 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.9.1 Bugs Fixed ---------- * When using ``--enable-debugger`` of mod_wsgi-express to enable Pdb, it was failing due to prior changes to run Apache in a sub processes to avoid Apache being shutdown when the window size changed. This was because standard input was being detached from Apache and so it was not possible to interact with Pdb. Now when ``--enable-debugger`` is used, or any feature which uses ``--debug-mode``, Apache will not be run in a sub process so that you can still use standard input to interact with the process if needed. This does mean that a window size change event will again cause Apache to shutdown in these cases though. * Update code so compiles on Python 3.11. Python 3.11 makes structures for Python frame objects opaque and requires functions to access struct members. Features Changed ---------------- * Historically when a process was being shutdown, mod_wsgi would do its best to destroy any Python sub interpreters as well as the main Python interpreter. This was done in case applications attempted to run any actions on process shutdown via ``atexit`` registered callbacks or other means. Because of changes in Python 3.9, and possibly because mod_wsgi makes use of externally created C threads to handle requests, and not Python native threads, there is now a suspiscion that attempting to delete Python sub interpreters can hang. It is believed this may relate to Python core now expecting all Python thread state objects to have been deleted before the Python sub interpreter can be destroyed. If they aren't then Python core code can block indefinitely. If the issue isn't the externally created C threads that mod_wsgi uses, it might instead be arising as a problem when a hosted WSGI application creates its own background threads but they are still running when the attempt is made to destroy the sub interpreter. In the case of using daemon mode the result is that processes can hang on shutdown, but will still at least be deleted after 5 seconds due to how Apache process management will forcibly kill managed processes after 5 seconds if they do not exit cleanly themselves. In other words the issue may not be noticed. For embedded mode however, the Apache child process can hang around indefinitely, possibly only being deleted if some higher level system application manager such as systemd is able to detect the problem and forcibly deleted the hung process. Although mod_wsgi always attempts to ensure that the externally created C threads are not still handling HTTP requests and thus not active prior to destroying the Python interpreter, it is impossible to guarantee this. Similarly, there is no way to guarantee that background threads created by a WSGI application aren't still running. As such, it isn't possible to safely attempt to delete the Python thread state objects before deleting the Python sub interpreter. Because of this uncertainty mod_wsgi now provides a way to disable the attempt to destroy the Python sub interpreters or the main Python interpreter when the process is being shutdown. This will though mean that ``atexit`` registered callbacks will not be called if this option is enabled. It is therefore important that you use mod_wsgi's own mechanism of being notified when a process is being shutdown to perform any special actions. :: import mod_wsgi def shutdown_handler(event, **kwargs): print('SHUTDOWN-HANDLER', event, kwargs) mod_wsgi.subscribe_shutdown(shutdown_handler) Use of this shutdown notification was necessary anyway to reliably attempt to stop background threads created by the WSGI application since ``atexit`` registered callbacks are not called by Python core until after it thinks all threads have been stopped. In other words, ``atexit`` register callbacks couldn't be used to reliably stop background threads. Thus use of the mod_wsgi mechanism for performing actions on process shutdown is the preferred way. Overall it is expected that the majority of users will not notice this change as it is very rare to see WSGI applications want to perform special actions on process shutdown. If you are affected, you should use mod_wsgi's mechanism to perform special actions on process shutdown. If you need to enable this mode whereby no attempt is made to destroy the Python interpreter (including sub interpreters) on process shutdown, you can add at global scope in the Apache configuration:: WSGIDestroyInterpreter Off If you are using mod_wsgi-express, you can instead supply the command line option ``--orphan-interpreter``. mod_wsgi-5.0.0/docs/release-notes/version-4.9.2.rst000066400000000000000000000006121452636074700217260ustar00rootroot00000000000000============= Version 4.9.2 ============= Version 4.9.2 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.9.2 Bugs Fixed ---------- * When using ``mod_wsgi-express`` in daemon mode, and source code reloading was enabled, an invalid URL path which contained a byte sequence which could not be decoded as UTF-8 was causing a process crash. mod_wsgi-5.0.0/docs/release-notes/version-4.9.3.rst000066400000000000000000000112401452636074700217260ustar00rootroot00000000000000============= Version 4.9.3 ============= Version 4.9.3 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.9.3 Bugs Fixed ---------- * When using ``WSGITrustedProxies`` and ``WSGITrustedProxyHeaders`` in the Apache configuration, or ``--trust-proxy`` and ``--trust-proxy-header`` options with ``mod_wsgi-express``, if you trusted the ``X-Client-IP`` header and a request was received from an untrusted client, the header was not being correctly removed from the set of headers passed through to the WSGI application. This only occurred with the ``X-Client-IP`` header and the same problem was not present if trusting the ``X-Real-IP`` or ``X-Forwarded-For`` headers. The purpose of this feature for trusting a front end proxy was in this case for the headers: * ``X-Client-IP`` * ``X-Real-IP`` * ``X-Forwarded-For`` and was designed to allow the value of ``REMOTE_ADDR`` passed to the WSGI application to be rewritten to the IP address that a trusted proxy said was the real remote address of the client. In other words, if a request was received from a proxy the IP address of which was trusted, ``REMOTE_ADDR`` would be set to the value of the single designated header out of those listed above which was to be trusted. In the case where the proxy was trusted, in addition to ``REMOTE_ADDR`` being rewritten, only the trusted header would be passed through. That is, if ``X-Real-IP`` was the trusted header, then ``HTTP_X_REAL_IP`` would be passed to the WSGI application, but ``HTTP_X_CLIENT_IP`` and ``HTTP_X_FORWARDED_FOR`` would be dropped if corresponding headers had also been supplied. That the header used to rewrite ``REMOTE_ADDR`` was passed through still was only intended for the purpose of documenting where the value of ``REMOTE_ADDR`` came from. A WSGI application when relying on this feature should only ever use the value of ``REMOTE_ADDR`` and should ignore the header passed through. The behaviour as described was therefore based on a WSGI application not at the same time enabling any WSGI or web framework middleware to try and process any proxy headers a second time and ``REMOTE_ADDR`` should be the single source of truth. Albeit the headers which were passed through should have resulted in the same result for ``REMOTE_ADDR`` if the proxy headers were processed a second time. Now in the case of the client a request was received from not being a trusted proxy, then ``REMOTE_ADDR`` would not be rewritten, and would be left as the IP of the client, and none of the headers listed above were supposed to be passed through. That ``REMOTE_ADDR`` is not rewritten is implemented correctly when the client is not a trusted proxy, but of the three headers listed above, ``HTTP_X_CLIENT_ID`` was not being dropped if the corresponding header was supplied. If the WSGI application followed best practice and only relied on the value of ``REMOTE_ADDR`` as the source of truth for the remote client address, then that ``HTTP_X_CLIENT_ID`` was not being dropped should pose no security risk. There would however be a problem if a WSGI application was still enabling a WSGI or web framework specific middleware to process the proxy headers a second time even though not required. In this case, the middleware used by the WSGI application may still trust the ``X-Client-IP`` header and rewrite ``REMOTE_ADDR`` allowing a malicious client to pretend to have a different IP address. In addition to the WSGI application having redundant checks for the proxy headers, to take advantage of this, a client would also need direct access to the Apache/mod_wsgi server instance. In the case that only clients on your private network behind your proxy could access the Apache/mod_wsgi server instance, that would imply any malicious actor already had access to your private network and had access to hosts in that private network or could attach their own device to that private network. In the case where your Apache/mod_wsgi server instance could be accessed from the same external networks as a proxy forwarding requests to it, such as may occur if making use of a CDN proxy cache, a client would still need to know the direct address used by the Apache/mod_wsgi server instance. Note that only one proxy header for designating the IP of a client should ever be trusted. If you trust more than one, then which will be used if both are present is undefined as it is dependent on the order that Apache processes headers. This hasn't changed and as before to avoid ambiguity you should only trust one of the proxy headers recognised for this purpose. mod_wsgi-5.0.0/docs/release-notes/version-4.9.4.rst000066400000000000000000000014571452636074700217400ustar00rootroot00000000000000============= Version 4.9.4 ============= Version 4.9.4 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/4.9.4 Bugs Fixed ---------- * Apache 2.4.54 changed the default value for ``LimitRequestBody`` from 0, which indicates there is no limit, to 1Gi. If the Apache configuration supplied with a distribution wasn't explicitly setting ``LimitRequestBody`` to 0 at global server scope for the purposes of documenting the default, and it was actually relying on the compiled in default, then when using mod_wsgi daemon mode, if a request body size greater than 1Gi was encountered the mod_wsgi daemon mode process would crash. * Fix ability to build mod_wsgi against Apache 2.2. Do note that in general only recent versions of Apache 2.4 are supported mod_wsgi-5.0.0/docs/release-notes/version-5.0.0.rst000066400000000000000000000014541452636074700217210ustar00rootroot00000000000000============= Version 5.0.0 ============= Version 5.0.0 of mod_wsgi can be obtained from: https://codeload.github.com/GrahamDumpleton/mod_wsgi/tar.gz/5.0.0 *Note that the major version 5.0 was introduced not because of any new major features but because from version 5.0 onwards compatability with Python 2.7 is no longer guaranteed. A minimum Python version of 3.8 will be enforced by the Python package installation configuration.* Features Changed ---------------- * The `setuptools` package is now required to be installed in order to use the `pip install` method to install mod_wsgi. This is because `distutils` has been removed in Python 3.12. Bugs Fixed ---------- * Fix ability to build mod_wsgi against Apache 2.2. Do note that in general only recent versions of Apache 2.4 are supported mod_wsgi-5.0.0/docs/reporting-bugs.rst000066400000000000000000000011231452636074700177700ustar00rootroot00000000000000============== Reporting Bugs ============== If you believe you have uncovered a bug in mod_wsgi code then lodge a bug report on the mod_wsgi issue tracker. The issue tracker is located on GitHub at: * https://github.com/GrahamDumpleton/mod_wsgi/issues Before creating a ticket on the issue tracker, please do try and ensure you attempt to resolve issues using the mod_wsgi mailing list first as explained in :doc:`finding-help`. The majority of issues lodged via the issue tracker are not actually bugs in mod_wsgi but due to external factors or simply a failure to read the documentation. mod_wsgi-5.0.0/docs/requirements.rst000066400000000000000000000030221452636074700175440ustar00rootroot00000000000000============ Requirements ============ The mod_wsgi package can be compiled for and used with most recent patch revisions of Apache 2.0, 2.2 or 2.4 on UNIX like systems, such as Linux and MacOS X, as well as Windows. It is highly recommended that you use Apache 2.4. Older versions of Apache have architectural design problems and sub optimal configuration defaults, that can result in excessive memory usage in certain circumstances. More recent mod_wsgi versions attempt to protect against these problems in Apache 2.0 and 2.2, however it is still better to use Apache 2.4. Any of the single threaded 'prefork' or multithreaded 'worker' and 'event' Apache MPMs can be used when running on UNIX like systems. Both Python 2 and 3 are supported. The minimum recommended versions of each being Python 2.6 and 3.3 respectively. The Python installation must have been installed in a way that shared libraries for Python are provided such that embedding of Python in another application is possible. The mod_wsgi package should be able to host any Python web application which complies with the WSGI_ specification (PEP 3333). The implementation is very strict with its interpretation of the WSGI specification. Other WSGI servers available aren't as strict and allow Python web applications to run which do not comply with the WSGI specification. If your Python web application doesn't comply properly with the WSGI specification, then it may fail to run or may run sub optimally when using mod_wsgi. .. _WSGI: http://www.python.org/dev/peps/pep-3333/ mod_wsgi-5.0.0/docs/security-issues.rst000066400000000000000000000011111452636074700201760ustar00rootroot00000000000000=============== Security Issues =============== Due to security issues in versions of mod_wsgi up to and including version 3.4, ensure that you are using version 3.5 or later. Release notes for versions containing security related fixes are: * :doc:`release-notes/version-3.5` Because many Linux distributions still ship ancient out of date versions, which are not supported, it is highly recommended you avoid using packaged binary versions provided by your Linux distribution. Instead install mod_wsgi from source code, ensuring you keep up to date with the most recent version. mod_wsgi-5.0.0/docs/source-code.rst000066400000000000000000000006231452636074700172350ustar00rootroot00000000000000=========== Source Code =========== The source code repository for mod_wsgi is located on GitHub at: * https://github.com/GrahamDumpleton/mod_wsgi Downloadable tar balls of the source code can be found at: * https://github.com/GrahamDumpleton/mod_wsgi/releases A version of the source code which can be installed using ``pip`` can also be found on PyPi at: * https://pypi.python.org/pypi/mod_wsgi mod_wsgi-5.0.0/docs/troubleshooting.rst000066400000000000000000000013171452636074700202550ustar00rootroot00000000000000=============== Troubleshooting =============== If you are having problems getting mod_wsgi to start up or do what you want it to do, first off ensure that you read the following documents: * :doc:`user-guides/installation-issues` * :doc:`user-guides/configuration-issues` * :doc:`user-guides/application-issues` You can also do some basic checking of your installation and configuration to validate that how it is setup is how you expect it to be. See the following document: * :doc:`user-guides/checking-your-installation` If none of the common issues match up with the problem you are seeing and are after other ideas, or you have the need to perform more low level debugging, check out the :doc:`user-guides`. mod_wsgi-5.0.0/docs/user-guides.rst000066400000000000000000000014461452636074700172650ustar00rootroot00000000000000============ User Guides ============ .. toctree:: :maxdepth: 1 user-guides/quick-installation-guide user-guides/installation-on-macosx user-guides/quick-configuration-guide user-guides/configuration-guidelines user-guides/installation-issues user-guides/configuration-issues user-guides/application-issues user-guides/frequently-asked-questions user-guides/checking-your-installation user-guides/debugging-techniques user-guides/processes-and-threading user-guides/reloading-source-code user-guides/virtual-environments user-guides/access-control-mechanisms user-guides/file-wrapper-extension user-guides/registering-cleanup-code user-guides/assorted-tips-and-tricks user-guides/issues-with-pickle-module user-guides/issues-with-expat-library mod_wsgi-5.0.0/docs/user-guides/000077500000000000000000000000001452636074700165265ustar00rootroot00000000000000mod_wsgi-5.0.0/docs/user-guides/access-control-mechanisms.rst000066400000000000000000000275711452636074700243400ustar00rootroot00000000000000Access Control Mechanisms ========================= This document contains information about mechanisms available in mod_wsgi for controlling who can access a WSGI application. This includes coverage of support for HTTP Basic and Digest authentication mechanisms, as well as server side mechanisms for authorisation and host access control. HTTP User Authentication ------------------------ The HTTP protocol supports user authentication mechanisms for clients through the 'Authorization' header. The two main examples for this are the Basic and Digest authentication mechanisms. Unlike other HTTP headers, the authorisation header is not passed through to a WSGI application by default. This is the case as doing so could leak information about passwords through to a WSGI application which should not be able to see them when Apache is performing authentication. If Apache is performing authentication, a WSGI application can still find out what type of authentication scheme was used by checking the variable ``AUTH_TYPE`` of the WSGI application environment. The login name of the authorised user can be determined by checking the variable ``REMOTE_USER``. If it is desired that the WSGI application be responsible for handling user authentication, then it is necessary to explicitly configure mod_wsgi to pass the required headers through to the application. This can be done by specifying the WSGIPassAuthorization directive in the appropriate context and setting it to 'On'. Note that prior to mod_wsgi version 2.0c5, this directive could not be used in .htaccess files. When passing of authorisation information is enabled, the authorisation headers are passed through to a WSGI application in the ``HTTP_AUTHORIZATION`` variable of the WSGI application environment when the equivalent HTTP request header is present. You will still need to provide your own code to process the header and perform the required hand shaking with the client to indicate whether the client is permitted access. Apache Authentication Provider ------------------------------ When Apache 2.2 was released, it introduced the concept of authentication providers. That is, Apache implements the hand shaking with the client for authentication mechanisms such as Basic and Digest. All that the user server side code needs to provide is a means of authenticating the actual credentials of the user trying to gain access to the site. This greatly simplified the implementation of client authentication as the hand shaking for a particular authentication mechanism was implemented only once in Apache and it wasn't necessary for each authentication module to duplicate it. This was particularly good for the Digest authentication mechanism which was non trivial to implement correctly. Using mod_wsgi 2.0 or later, it is possible using the WSGIAuthUserScript directive to define a Python script file containing code which performs the authenticating of user credentials as outlined. The required Apache configuration for defining the authentication provider for Basic authentication when using Apache 2.2 would be:: AuthType Basic AuthName "Top Secret" AuthBasicProvider wsgi WSGIAuthUserScript /usr/local/wsgi/scripts/auth.wsgi Require valid-user The 'auth.wsgi' script would then need to contain a 'check_password()' function with a sample as shown below:: def check_password(environ, user, password): if user == 'spy': if password == 'secret': return True return False return None This function should validate that the user exists in the user database and that the password is correct. If the user does not exist at all, then the result should be 'None'. If the user does exist, the result should be 'True' or 'False' depending on whether the password was valid. If wishing to use Digest authentication, the configuration for Apache 2.2 would instead be:: AuthType Digest AuthName "Top Secret" AuthDigestProvider wsgi WSGIAuthUserScript /usr/local/wsgi/scripts/auth.wsgi Require valid-user The name of the required authentication function for Digest authentication is 'get_realm_hash()'. The result of the function must be 'None' if the user doesn't exist, or a hash string encoding the user name, authentication realm and password:: import hashlib def get_realm_hash(environ, user, realm): if user == 'spy': value = hashlib.md5() # user:realm:password input = '%s:%s:%s' % (user, realm, 'secret') if not isinstance(input, bytes): input = input.encode('UTF-8') value.update(input) hash = value.hexdigest() return hash return None By default the auth providers are executed in context of first interpreter created by Python, ie., '%{GLOBAL}' and always in the Apache child processes, never in a daemon process. The interpreter can be overridden using the 'application-group' option to the script directive. The namespace for authentication groups is shared with that for application groups defined by WSGIApplicationGroup. Because the auth provider is always run in the Apache child processes and never in the context of a mod_wsgi daemon process, if the authentication check is making use of the internals of some Python web framework, it is recommended that the application using that web framework also be run in embedded mode and the same application group. This is the case as the Python web frameworks often bring in a huge amount of code even if using only one small part of them. This will result in a lot of memory being used in the Apache child processes just to support the auth provider. If mod_authn_alias is being loaded into Apache, then an aliased auth provider can also be defined:: WSGIAuthUserScript /usr/local/django/mysite/apache/auth.wsgi \ application-group=django WSGIScriptAlias / /usr/local/django/mysite/apache/django.wsgi Order allow,deny Allow from all = 2.4> Require all granted WSGIApplicationGroup django AuthType Basic AuthName "Django Site" AuthBasicProvider django Require valid-user An authentication script for Django might then be something like:: import os, sys sys.path.append('/usr/local/django') os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings' from django.contrib.auth.models import User from django import db def check_password(environ, user, password): db.reset_queries() kwargs = {'username': user, 'is_active': True} try: try: user = User.objects.get(**kwargs) except User.DoesNotExist: return None if user.check_password(password): return True else: return False finally: db.connection.close() For both Basic and Digest authentication providers, the 'environ' dictionary passed as first argument is a cut down version of what would be supplied to the actual WSGI application. This includes the 'wsgi.errors' object for the purposes of logging error messages associated with the request. Any configuration defined by !SetEnv directives is not passed in the 'environ' dictionary because doing so would allow users to override the configuration specified in such a way from a '.htaccess' file. Configuration should as a result be placed into the script file itself. Although authentication providers were a new feature in Apache 2.2, the mod_wsgi module emulates the functionality so that the above can also be used with Apache 2.0. In using Apache 2.0, the required Apache configuration is however slightly different and needs to be:: AuthType Basic AuthName "Top Secret" WSGIAuthUserScript /usr/local/wsgi/scripts/auth.wsgi AuthAuthoritative Off Require valid-user When using Apache 2.0 however, only support for Basic authentication mechanism is provided. It is not possible to use Digest authentication. When using Apache 1.3, this feature is not available at all. The benefit of using the Apache authentication provider mechanism rather than the WSGI application doing it all itself, is that it can be used to control access to a number of WSGI applications at the same time as well as static files or dynamic pages implemented by other Apache modules using other programming languages such as PHP or Perl. The mechanism could even be used to control access to CGI scripts. Apache Group Authorisation -------------------------- As compliment to the authentication provider mechanism, mod_wsgi 2.0 also provides a mechanism for implementing group authorisation using the Apache 'Require' directive. To use this in conjunction with an inbuilt Apache authentication provider such as a password file, the following Apache configuration would be used:: AuthType Basic AuthName "Top Secret" AuthBasicProvider dbm AuthDBMUserFile /usr/local/wsgi/accounts.dbm WSGIAuthGroupScript /usr/local/wsgi/scripts/auth.wsgi Require wsgi-group secret-agents Require valid-user The 'auth.wsgi' script would then need to contain a 'groups_for_user()' function with a sample as shown below:: def groups_for_user(environ, user): if user == 'spy': return ['secret-agents'] return [''] The function should supply a list of groups the user is a member of or an empty list otherwise. The feature may be used with any authentication provider, including one defined using WSGIAuthUserScript. The 'environ' dictionary passed as first argument is a cut down version of what would be supplied to the actual WSGI application. This includes the 'wsgi.errors' object for the purposes of logging error messages associated with the request. Any configuration defined by !SetEnv directives is not passed in the 'environ' dictionary because doing so would allow users to override the configuration specified in such a way from a '.htaccess' file. Configuration should as a result be placed into the script file itself. Configuration of group authorisation is the same whether Apache 2.0 or 2.2 is used. The feature is not available when using Apache 1.3. By default the group authorisation code is always executed in the context of the first interpreter created by Python, ie., '%{GLOBAL}', and always in the Apache child processes, never in a daemon process. The interpreter can be overridden using the 'application-group' option to the script directive. Host Access Controls -------------------- The authentication provider and group authorisation features help to control access based on the identity of a user. Using mod_wsgi 2.0 it is also possible to limit access based on the machine which the client is connecting from. The path to the script is defined using the WSGIAccessScript directive:: WSGIAccessScript /usr/local/wsgi/script/access.wsgi The name of the function that must exist in the script file is 'allow_access()'. It must return True or False:: def allow_access(environ, host): return host in ['localhost', '::1'] The 'environ' dictionary passed as first argument is a cut down version of what would be supplied to the actual WSGI application. This includes the 'wsgi.errors' object for the purposes of logging error messages associated with the request. Any configuration defined by !SetEnv directives is not passed in the 'environ' dictionary because doing so would allow users to override the configuration specified in such a way from a '.htaccess' file. Configuration should as a result be placed into the script file itself. By default the access checking code is executed in context of the first interpreter created by Python, ie., '%{GLOBAL}', and always in the Apache child processes, never in a daemon process. The interpreter used can be overridden using the 'application-group' option to the script directive. mod_wsgi-5.0.0/docs/user-guides/application-issues.rst000066400000000000000000001673741452636074700231160ustar00rootroot00000000000000================== Application Issues ================== Although installation and configuration of mod_wsgi may be successful, there are a range of issues that can impact on specific WSGI applications. These problems can arise for various reasons, including conflicts between an application and other Apache modules or non WSGI applications hosted by Apache, a WSGI application not being portable, use of Python modules that are not fully compatible with the way that mod_wsgi uses Python sub interpreters, or dependence on a specific operating system execution environment. The purpose of this document is to capture all the known problems that can arise, including workarounds if available, related to the actual running of a WSGI application. Note that the majority of these issues are not unique to mod_wsgi and would also affect mod_python as well. This is because they arise due to the fact that the Python interpreter is being embedded within the Apache server itself. Unlike mod_python, in mod_wsgi there are ways of avoiding many of the problems by using daemon mode. If you are having a problem which doesn't seem to be covered by this document, also make sure you see :doc:`../user-guides/installation-issues` and :doc:`../user-guides/configuration-issues`. Access Rights Of Apache User ---------------------------- For most Apache installations the web server is initially started up as the root user. This is necessary as operating systems will block non root applications from making use of Internet ports below 1024. A web server responding to HTTP and HTTPS requests on the standard ports will need to be able to acquire ports 80 and 443. Once the web server has acquired these ports and forked off child processes to handle any requests, the user that the child processes run as will be switched to a non privileged user. The actual name of this user varies from one system to another with some commonly used names being 'apache', 'httpd', 'www', and 'wwwserv'. As well as the user being switched, the web server will also normally switch to an alternate group. If running a WSGI application in embedded mode with mod_wsgi, the user and group that the Apache child processes run as will be inherited by the application. To determine which user and group would be used the main Apache configuration files should be consulted. The particular configuration directives which control this are ``User`` and ``Group``. For example:: User www Group www Because this user is non privileged and will generally be different to the user that owns the files for a specific WSGI application, it is important that such files and the directories which contain them are accessible to others. If the files are not readable or the directories not searchable, the web server will not be able to see or read the files and execution of the WSGI application will fail at some point. As well as being able to read files, if a WSGI application needs to be able to create or edit files, it will be necessary to create a special directory which it can use to create files in and which is owned by the same user that Apache is running as. Any files contained in the directory which it needs to edit should also be owned by the user that Apache is run as, or group privileges used in some way to ensure the application will have the required access to update the file. One example of where access rights can be a problem in Python is with Python eggs which need to be unpacked at runtime by a WSGI application. This issue arises with Trac because of its ability for plugins to be packaged as Python eggs. Pylons with its focus on being able to support Python eggs in its deployment mechanism can also be affected. Because of the growing reliance on Python eggs however, the issue could arise for any WSGI application where you have installed Python eggs in their zipped up form rather than their unpacked form. If your WSGI application is affected by this problem in relation to Python eggs, you would generally see a Python exception similar to the following occuring and being logged in the Apache error logs:: ExtractionError: Can't extract file(s) to egg cache The following error occurred while trying to extract file(s) to the Python egg cache: [Errno 13] Permission denied: '/var/www/.python-eggs' The Python egg cache directory is currently set to: /var/www/.python-eggs Perhaps your account does not have write access to this directory? You can change the cache directory by setting the PYTHON_EGG_CACHE environment variable to point to an accessible directory. To avoid this particular problem you can set the 'PYTHON_EGG_CACHE' cache environment variable at the start of the WSGI application script file. The environment variable should be set to a directory which is owned and/or writable by the user that Apache runs as:: import os os.environ['PYTHON_EGG_CACHE'] = '/usr/local/pylons/python-eggs' Alternatively, if using mod_wsgi 2.0, one could also use the WSGIPythonEggs directive for applications running in embedded mode, or the 'python-eggs' option to the WSGIDaemonProcess directive when using daemon mode. Note that you should refrain from ever using directories or files which have been made writable to anyone as this could compromise security. Also be aware that if hosting multiple applications under the same web server, they will all run as the same user and so it will be possible for each to both see and modify each others files. If this is an issue, you should host the applications on different web servers running as different users or on different systems. Alternatively, any data required or updated by the application should be hosted in a database with separate accounts for each application. Issues related to access rights can in general be avoided if daemon mode of mod_wsgi is used to run a WSGI application. This is because in daemon mode the user and group that the processes run as can be overridden and set to alternate values. Do however note additional issues related to 'HOME' environment variable as described below. Secure Variants Of UNIX ----------------------- In addition to the constraints imposed by Apache running as a distinct user, some variants of UNIX have features whereby access privileges for a specific user may be even further restricted. One example of such a system is SELinux. In such a system, the user that Apache runs as is typically restricted to only being able to access quite specific parts of the file system as well as possibly other resources or operating system library features. If running such a system you will need to change the configuration for the security system to allow both mod_wsgi and you application to do what is required. As an example, the extra security checks of such a system may present problems if the version of Python you are using only provides a static library and not a shared library. If you experience an error similar to:: Cannot load /etc/httpd/modules/mod_wsgi.so into server: \ /etc/httpd/modules/mod_wsgi.so: cannot restore segment prot after reloc: \ Permission denied you will either need to configure the security system appropriately to allow that memory relocations in static code to work, or you would need to make sure that you reinstall Python such that it provides a shared library and rebuild mod_wsgi. Other issues around only having a static variant of the Python library available are described in section 'Lack Of Python Shared Library' of :doc:`../user-guides/installation-issues`. Even where a shared library is used, SELinux has also resulted in similar memory related errors when loading C extension modules at run time for Python:: ImportError: /opt/python2.6/lib/python2.6/lib-dynload/itertools.so: \ failed to map segment from shared object: Permission denied All up, configuring SELinux is a bit of a black art and so you are wise to do your research. For some information about using mod_wsgi in a SELinux enabled environment check out: * http://www.packtpub.com/article/selinux-secured-web-hosting-python-based-web-applications * http://www.globalherald.net/jb01/weblog/21.html * http://blog.endpoint.com/2010/02/selinux-httpd-modwsgi-26-rhel-centos-5.html If you suspect that an issue may be caused by SELinux, you could temporarily try disabling it and doing a restart to verify whether it is the cause, but always re-enable it and do not disable it completely. Application Working Directory ----------------------------- When Apache is started it is typically run such that the current working directory for the application is the root directory, although the actual directory may vary dependent on the system or any extra security system in place. Importantly, the current working directory will generally never have any direct relationship to any specific WSGI application. As a result, an application should never assume that it can use relative path names for accessing the filesystem. All paths used should always be absolute path names. An application should also never change the current working directory and then assume that it can then use relative paths. This is because other applications being hosted on the same web server may assume they can do the same thing with the result that you can never be sure what the current working directory may actually be. You should not even assume that it is safe to change the working directory immediately prior to a specific operation, as use of multithreading can mean that another application could change it even before you get to perform the operation which depended on the current working directory being the value you set it to. In the case of Python, if needing to use relative paths in order to make it easier to relocate an application, one can determine the directory that a specific code module is located in using ``os.path.dirname(__file__)``. A full path name can then be constructed by using ``os.path.join()`` to merge the relative path with the directory name where the module was located. Another option is to take the directory part of the ``SCRIPT_FILENAME`` variable from the WSGI environment as the base directory. The only other alternative is to rely on a centralised configuration file so that all absolute path names are at least defined in the one place. Although it is preferable that an application never make assumptions about what the current working directory is, if for some reason the application cannot be changed the daemon mode of mod_wsgi could be used. This will work as an initial current working directory for the process can be specified as an option to the WSGIDaemonProcess directive used to configure the daemon process. Because the working directory applies to the whole process however, only the application requiring this working directory should be delegated to run within the context of that daemon process. Application Environment Variables --------------------------------- When Python sub interpreters are created, each has its own copy of any modules which are loaded. They also each have their own copy of the set of environment variables inherited by the process and found in ``os.environ``. Problems can arise with the use of ``os.environ`` though, due to the fact that updates to ``os.environ`` are pushed back into the set of process environment variables. This means that if the Python sub interpreter which corresponds to another application group is created after ``os.environ`` has been updated, the new value for that environment variable will be inherited by the new Python sub interpreter. This would not generally be a problem where a WSGI application is configured using a single mandatory environment variable, as the WSGI application script file for each application instance would be required to set it, thereby overriding any value inherited from another application instance via the process environment variables. As example, Django relies on the ``DJANGO_SETTINGS_MODULE`` environment variable being set to be the name of the Python module containing Django's configuration settings. So long as each WSGI script file sets this variable all will be okay. Where use of environment variables can be problematic though is where there are multiple environment variables that can be set, with some being optional and non overlapping sets of variables are used to configure different modes. As example, Trac can be configured to host a single project by setting the ``TRAC_ENV`` environment variable. Alternatively, Trac can be configured to host a group of projects by setting the ``TRAC_ENV_PARENT_DIR`` environment variable. If both variables are set at the same time, then ``TRAC_ENV`` takes precedence. If now within the one process you have a Trac instance of each type in different Python sub interpreters, if that using ``TRAC_ENV`` loads first, when the other is loaded it will inherit ``TRAC_ENV`` from the first and that will override ``TRAC_ENV_PARENT_DIR``. The end result is that both sites point at the same single project, rather than the first being for the single project and the other being the group of projects. Because of this potential leakage of environment variables between Python sub interpreters, it is preferable that WSGI applications not rely on environment variables for configuration. A further reason that environment variables should not be used for configuration is that it then becomes impossible to host two instances of the same WSGI application component within the same Python sub interpreter if each would require a different value be set for the same environment variable. Note that this also applies to other means of hosting WSGI applications besides mod_wsgi and is not mod_wsgi specific. As a consequence, because Django relies on the ``DJANGO_SETTINGS_MODULE`` environment variable being set to be the name of the Python module containing Django's configuration settings, it would be impossible to host two Django instances in the same Python sub interpreter. It is thus important that where there are multiple instances of Django that need to be run on the same web server, that they run in separate Python sub interpreters. As it stands the default behaviour of mod_wsgi is to run different WSGI application scripts within the context of different Python sub interpreters. As such, this limitation in Django does not present as an immediate problem, however it should be kept in mind when attempting to merge multiple WSGI applications into one application group under one Python sub interpreter to try and limit memory use by avoiding duplicate instances of modules in memory. The prefered way of configuring a WSGI application is for the application to be a class instance which at the point of initialisation is provided with its configuration data as an argument. Alternatively, or in conjunction with this, configuration information can be passed through to the WSGI application in the WSGI environment. Variables in the WSGI environment could be set by a WSGI middleware component, or from the Apache configuration files using the ``SetEnv`` directive. Configuring an application when it is first constructed, or by supplying the configuration information through the WSGI environment variables, is thus the only way to ensure that a WSGI application is portable between different means of hosting WSGI applications. These problems can also be avoided by using daemon mode of mod_wsgi and delegating each WSGI application instance to a distinct daemon process group. Timezone and Locale Settings ---------------------------- More insidious than the problem of leakage of application environment variable settings between sub interpreters, is where an environment variable is required by operating system libraries to set behaviour. This is a problem because applications running in different sub interpreters could set the process environment variable to be different values. Rather than each seeing behaviour consistant with the setting they used, all applications will see behaviour reflecting the setting as determined by the last application to initialise itself. Process environment variables where this can be a problem are the 'TZ' environment variable for setting the timezone, and the 'LANG', 'LC_TYPE', 'LC_COLLATE', 'LC_TIME' and 'LC_MESSAGES' environment variables for setting the locale and language settings. The result of this, is that you cannot host multiple WSGI applications in the same process, even if running in different sub interpreters, if they require different settings for timezone, locale and/or language. In this situation you would have no choice but to use mod_wsgi daemon mode and delegate applications requiring different settings to different daemon process groups. Alternatively, completely different instances of Apache should be used. User HOME Environment Variable ------------------------------ If Apache is started automatically as 'root' when a machine is first booted it would inherit the user 'HOME' environment variable setting of the 'root' user. If however, Apache is started by a non privileged user via the 'sudo' command, it would inherit the 'HOME' environment variable of the user who started it, unless the ``-H`` option had been supplied to 'sudo'. In the case of the ``-H`` option being supplied, the 'HOME' environment variable of the 'root' user would again be used. Because the value of the 'HOME' environment variable can vary based on how Apache has been started, an application should not therefore depend on the 'HOME' environment variable. Unfortunately, parts of the Python standard library do use the 'HOME' environment variable as an authoritative source of information. In particular, the 'os.expanduser()' function gives precedence to the value of the 'HOME' environment variable over the home directory as obtained from the user password database entry:: if 'HOME' not in os.environ: import pwd userhome = pwd.getpwuid(os.getuid()).pw_dir else: userhome = os.environ['HOME'] That the 'os.expanduser()' function does this means it can yield incorrect results. Since the 'setuptools' package uses 'os.expanduser()' on UNIX systems to calculate where to store Python EGGS, the location it tries to use can change based on who started Apache and how. The only way to guarantee that the 'HOME' environment variable is set to a sensible value is for it to be set explicitly at the start of the WSGI script file before anything else is done:: import os, pwd os.environ["HOME"] = pwd.getpwuid(os.getuid()).pw_dir In mod_wsgi 2.0, if using daemon mode the value of the 'HOME' environment variable will be automatically reset to correspond to the home directory of the user that the daemon process is running as. This is not done for embedded mode however, due to the fact that the Apache child processes are shared with other Apache modules and it is not seen as appropriate that mod_wsgi should be changing the same environment that is used by these other unrelated modules. For some consistency in the environment inherited by applications running in embedded mode, it is therefore recommended that 'sudo -H' at least always be used when restarting Apache from a non root account. Application Global Variables ---------------------------- Because the Python sub interpreter which hosts a WSGI application is retained in memory between requests, any global data is effectively persistent and can be used to carry state forward from one request to the next. On UNIX systems however, Apache will normally use multiple processes to handle requests and each such process will have its own global data. This means that although global data can be used, it can only be used to cache data which can be safely reused within the context of that single process. You cannot use global data as a means of holding information that must be visible to any request handler no matter which process it runs in. If data must be visible to all request handlers across all Apache processes, then it will be necessary to store the data in the filesystem directly, or using a database. Alternatively, shared memory can be employed by using a package such as memcached. Because your WSGI application can be spread across multiple process, one must also be very careful in respect of local caching mechanisms employed by database connector objects. If such an adapter is quite agressive in its caching, it is possible that a specific process may end up with an out of date view of data from a database where one of the other processes has since changed the data. The result may be that requests handled in different processes may give different results. The problems described above can be alleviated to a degree by using daemon mode of mod_wsgi and restricting to one the number of daemon processes in the process group. This will ensure that all requests are serviced by the same process. If the data is only held in memory, it would however obviously be lost when Apache is restarted or the daemon process is restarted due to a maximum number of requests being reached. Writing To Standard Output -------------------------- No WSGI application component which claims to be portable should write to standard output. That is, an application should not use the Python ``print`` statement without directing output to some alternate stream. An application should also not write directly to ``sys.stdout``. This is necessary as an underlying WSGI adapter hosting the application may use standard output as the means of communicating a response back to a web server. This technique is for example used when WSGI is hosted within a CGI script. Ideally any WSGI adapter which uses ``sys.stdout`` in this way should cache a reference to ``sys.stdout`` for its own use and then replace it with a reference to ``sys.stderr``. There is however nothing in the WSGI specification that requires this or recommends it, so one can't therefore rely on it being done. In order to highlight non portable WSGI application components which write to or use standard output in some way, mod_wsgi prior to version 3.0 replaced ``sys.stdout`` with an object which will raise an exception when any attempt is made to write to or make use of standard output:: IOError: sys.stdout access restricted by mod_wsgi If the WSGI application you are using fails due to use of standard output being restricted and you cannot change the application or configure it to behave differently, you have one of two options. The first option is to replace ``sys.stdout`` with ``sys.stderr`` at the start of your WSGI application script file:: import sys sys.stdout = sys.stderr This will have the affect of directing any data written to standard output to standard error. Such data sent to standard error is then directed through the Apache logging system and will appear in the main Apache error log file. The second option is to remove the restriction on using standard output imposed by mod_wsgi using a configuration directive:: WSGIRestrictStdout Off This configuration directive must appear at global scope within the Apache configuration file outside of any VirtualHost container directives. It will remove the restriction on using standard output from all Python sub interpreters that mod_wsgi creates. There is no way using the configuration directive to remove the restriction from only one Python sub interpreter. When the restriction is not imposed, any data written to standard output will also be directed through the Apache logging system and will appear in the main Apache error log file. Ideally though, code should never use the 'print' statement without redirecting the output to 'sys.stderr'. Thus if the code can be changed, then it should be made to use something like:: import sys def function(): print >> sys.stderr, "application debug" ... Also, note that code should ideally not be making assumptions about the environment it is executing in, eg., whether it is running in an interactive mode, by asking whether standard output is a tty. In other words, calling 'isatty()' will cause a similar error with mod_wsgi. If such code is a library module, the code should be providing a way to specifically flag that it is a non interactive application and not use magic to determine whether that is the case or not. For further information about options for logging error messages and other debugging information from a WSGI application running under mod_wsgi see section 'Apache Error Log Files' of :doc:`../user-guides/debugging-techniques`. WSGI applications which are known to write data to standard output in their default configuration are CherryPy and TurboGears. Some plugins for Trac also have this problem. Thus one of these two techniques described above to remove the restriction, should be used in conjunction with these WSGI applications. Alternatively, those applications will need to be configured not to output log messages via standard output. Note that the restrictions on writing to stdout were removed in mod_wsgi 3.0 because it was found that people couldn't be bothered to fix their code. Instead they just used the documented workarounds, thereby propogating their non portable WSGI application code. As such, since people just couldn't care, stopped promoting the idea of writing portable WSGI applications. Reading From Standard Input --------------------------- No general purpose WSGI application component which claims to be portable should read from standard input. That is, an application should not read directly from ``sys.stdin`` either directly or indirectly. This is necessary as an underlying WSGI adapter hosting the application may use standard input as the means of receiving a request from a web server. This technique is for example used when WSGI is hosted within a CGI script. Ideally any WSGI adapter which uses ``sys.stdin`` in this way should cache a reference to ``sys.stdin`` for its own use and then replace it with an instance of ``StringIO.StringIO`` wrapped around an empty string such that reading from standard input would always give the impression that there is no input data available. There is however nothing in the WSGI specification that requires this or recommends it, so one can't therefore rely on it being done. In order to highlight non portable WSGI application components which try and read from or otherwise use standard input, mod_wsgi prior to version 3.0 replaced ``sys.stdin`` with an object which will raise an exception when any attempt is made to read from standard input or otherwise manipulate or reference the object:: IOError: sys.stdin access restricted by mod_wsgi This restriction on standard input will however prevent the use of interactive debuggers for Python such as ``pdb``. It can also interfere with Python modules which use the ``isatty()`` method of ``sys.stdin`` to determine whether an application is being run within an interactive session. If it is required to be able to run such debuggers or other code which requires interactive input, the restriction on using standard input can be removed using a configuration directive:: WSGIRestrictStdin Off This configuration directive must appear at global scope within the Apache configuration file outside of any VirtualHost container directives. It will remove the restriction on using standard input from all Python sub interpreters that mod_wsgi creates. There is no way using the configuration directive to remove the restriction from only one Python sub interpreter. Note however that removing the restriction serves no purpose unless you also run the Apache web server in single process debug mode. This is because Apache normally makes use of multiple processes and would close standard input to prevent any process trying to read from standard input. To run Apache in single process debug mode and thus allow an interactive Python debugger such as ``pdb`` to be used, your Apache instance should be shutdown and then the ``httpd`` program run explicitly:: httpd -X For more details on using interactive debuggers in the context of mod_wsgi see documentation on :doc:`../user-guides/debugging-techniques`. Note that the restrictions on reading from stdin were removed in mod_wsgi 3.0 because it was found that people couldn't be bothered to fix their code. Instead they just used the documented workarounds, thereby propogating their non portable WSGI application code. As such, since people just couldn't care, stopped promoting the idea of writing portable WSGI applications. Registration Of Signal Handlers ------------------------------- Web servers upon which WSGI applications are hosted more often than not use signals to control their operation. The Apache web server in particular uses various signals to control its operation including the signals ``SIGTERM``, ``SIGINT``, ``SIGHUP``, ``SIGWINCH`` and ``SIGUSR1``. If a WSGI application were to register their own signal handlers it is quite possible that they will interfere with the operation of the underlying web server, preventing it from being shutdown or restarted properly. As a general rule therefore, no WSGI application component should attempt to register its own signal handlers. In order to actually enforce this, mod_wsgi will intercept all attempts to register signal handlers and cause the registration to be ignored. As warning that this is being done, a message will be logged to the Apache error log file of the form:: mod_wsgi (pid=123): Callback registration for signal 1 ignored. If there is some very good reason that this feature should be disabled and signal handler registrations honoured, then the behaviour can be reversed using a configuration directive:: WSGIRestrictSignal Off This configuration directive must appear at global scope within the Apache configuration file outside of any VirtualHost container directives. It will remove the restriction on signal handlers from all Python sub interpreters that mod_wsgi creates. There is no way using the configuration directive to remove the restriction from only one Python sub interpreter. WSGI applications which are known to register conflicting signal handlers are CherryPy and TurboGears. If the ability to use signal handlers is reenabled when using these packages it prevents the shutdown and restart sequence of Apache from working properly and the main Apache process is forced to explicitly terminate the Apache child processes rather than waiting for them to perform an orderly shutdown. Similar issues will occur when using features of mod_wsgi daemon mode to recycle processes when a set number of requests has been reached or an inactivity timer has expired. Pickling of Python Objects -------------------------- The script files that mod_wsgi uses as the entry point for a WSGI application, although containing Python code, are not treated exactly the same as a Python code module. This has implications when it comes to using the 'pickle' module in conjunction which objects contained within the WSGI application script file. In practice what this means is that neither function objects, class objects or instances of classes which are defined in a WSGI application script file should be stored using the "pickle" module. In order to ensure that no strange problems at all are likely to occur, it is suggested that only basic builtin Python types, ie., scalars, tuples, lists and dictionaries, be stored using the "pickle" module from a WSGI application script file. That is, avoid any type of object which has user defined code associated with it. The technical reasons for the limitations in the use of the "pickle" module in conjunction with WSGI application script files are further discussed in the document :doc:`../user-guides/issues-with-pickle-module`. Note that the limitations do not apply to standard Python modules and packages imported from within a WSGI application script file from directories on the standard Python module search path. Expat Shared Library Conflicts ------------------------------ One of the Python modules which comes standard with Python is the 'pyexpat' module. This contains a Python wrapper for the popular 'expat' library. So as to avoid dependencies on third party packages the Python package actually contains a copy of the 'expat' library source code and embeds it within the 'pyexpat' module. Prior to Python 2.5, there was however no attempt to properly namespace the public functions within the 'expat' library source code. The problem this causes with mod_wsgi is that Apache itself also provides its own copy of and makes use of the 'expat' library. Because the Apache version of the 'expat' library is loaded first, it will always be used in preference to the version contained with the Python 'pyexpat' module. As a result, if the 'pyexpat' module is loaded into a WSGI application and the version of the 'expat' library included with Python is markedly different in some way to the Apache version, it can cause Apache to crash with a segmentation fault. It is thus important to ensure that Apache and Python use a compatible version of the 'expat' library to avoid this problem. For further technical discussion of this issue and how to determine which version of the 'expat' library both Apache and Python use, see the document :doc:`../user-guides/issues-with-expat-library`. MySQL Shared Library Conflicts ------------------------------ Shared library version conflicts can also occur with the MySQL client libraries. In this case the conflict is usually between an Apache module that uses MySQL directly such as mod_auth_mysql or mod_dbd_mysql, or an Apache module that indirectly uses MySQL such as PHP, and the Python 'MySQLdb' module. The result of conflicting library versions can be Apache crashing, or incorrect results beings returned from the MySQL client library for certain types of operations. To ascertain if there is a conflict, you need to determine which versions of the shared library each package is attempting to use. This can be done by running, on Linux, the 'ldd' command to list the library dependencies. This should be done on any Apache modules that are being loaded, any PHP modules and the Python ``_mysql`` C extension module:: $ ldd /usr/lib/python2.3/site-packages/_mysql.so | grep mysql libmysqlclient_r.so.15 => /usr/lib/libmysqlclient_r.so.15 (0xb7d52000) $ ldd /usr/lib/httpd/modules/mod_*.so | grep mysql libmysqlclient.so.12 => /usr/lib/libmysqlclient.so.12 (0xb7f00000) $ ldd /usr/lib/php4/*.so | grep mysql /usr/lib/php4/mysql.so: libmysqlclient.so.10 => /usr/lib/mysql/libmysqlclient.so.10 (0xb7f6e000) If there is a difference in the version of the MySQL client library, or one version is reentrant and the other isn't, you will need to recompile one or both of the packages such that they use the same library. SSL Shared Library Conflicts ---------------------------- When Apache is built, if it cannot find an existing SSL library that it can use or isn't told where one is that it should use, it will use a SSL library which comes with the Apache source code. When this SSL code is compiled it will be statically linked into the actual Apache executable. To determine if the SSL code is static rather than dynamically loaded from a shared library, on Linux, the 'ldd' command can be used to list the library dependencies. If an SSL library is listed, then code will not be statically compiled into Apache:: $ ldd /usr/local/apache/bin/httpd | grep ssl libssl.so.0.9.8 => /usr/lib/i686/cmov/libssl.so.0.9.8 (0xb79ab000) Where a Python module now uses a SSL library, such as a database client library with SSL support, they would typically always obtain SSL code from a shared library. When however the SSL library functions have also been compiled statically into Apache, they can conflict and interfere with those from the SSL shared library being used by the Python module. Such conflicts can cause core dumps, or simply make it appear that SSL support in either Apache or the Python module is not working. Python modules where this is known to cause a problem are, any database client modules which include support for connecting to the database using an SSL connection, and the Python 'hashlib' module introduced in Python 2.5. In the case of the 'hashlib' module it will fail to load the internal C extension module called ``_hashlib`` because of the conflict. That ``_hashlib`` module couldn't be loaded is however not raised as an exception, and instead the code will fallback to attempting to load the older ``_md5`` module. In Python 2.5 however, this older ``_md5`` module is not generally compiled and so the following error will occur:: ImportError: No module named _md5 To resolve this problem it would be necessary to rebuild Apache and use the ``--with-ssl`` option to 'configure' to specify the location of the distinct SSL library that is being used by the Python modules. Note that it has also been suggested that the !ImportError above can also be caused due to the 'python-hashlib' package not being installed. This might be the case on Linux systems where this module was separated from the main Python package. Python MD5 Hash Module Conflict ------------------------------- Python provides in the form of the 'md5' module, routines for calculating MD5 message-digest fingerprint (checksum) values for arbitrary data. This module is often used in Python web frameworks for generating cookie values to be associated with client session information. If a WSGI application uses this module, it is however possible that a conflict can arise if PHP is also being loaded into Apache. The end result of the conflict will be that the 'md5' module in Python can given incorrect results for hash values. For example, the same value may be returned no matter what the input data, or an incorrect or random value can be returned even for the same data. In the worst case scenario the process may crash. As might be expected this can cause session based login schemes such as commonly employed by Python web frameworks such as Django, TurboGears or Trac to fail in strange ways. The underlying trigger for all these problems appears to be a clash between the Python 'md5' module and the 'libmhash2' library used by the PHP 'mhash' module, or possibly also other PHP modules relying on md5 routines for cryptography such as the LDAP module for PHP. This clash has come about because because md5 source code in Python was replaced with an alternate version when it was packaged for Debian. This version did not include in the "md5.h" header file some preprocessor defines to rename the md5 functions with a namespace prefix specific to Python:: #define MD5Init _PyDFSG_MD5Init #define MD5Update _PyDFSG_MD5Update #define MD5Final _PyDFSG_MD5Final #define MD5Transform _PyDFSG_MD5Transform void MD5Init(struct MD5Context *context); void MD5Update(struct MD5Context *context, md5byte const *buf, unsigned len); void MD5Final(unsigned char digest[16], struct MD5Context *context); As a result, the symbols in the md5 module ended up being:: $ nm -D /usr/lib/python2.4/lib-dynload/md5.so | grep MD5 0000000000001b30 T MD5Final 0000000000001380 T MD5Init 00000000000013b0 T MD5Transform 0000000000001c10 T MD5Update The symbols then clashed directly with the non namespaced symbols present in the 'libmhash2' library:: $ nm -D /usr/lib/libmhash.so.2 | grep MD5 00000000000069b0 T MD5Final 0000000000006200 T MD5Init 0000000000006230 T MD5Transform 0000000000006a80 T MD5Update In Python 2.5 the md5 module is implemented in a different way and thus this problem should only occur with older versions of Python. For those older versions of Python, the only workaround for this problem at the present time is to disable the loading of the 'mhash' module or other PHP modules which use the 'libmhash2' library. This will avoid the problem with the Python 'md5' module, obviously however, not loading these modules into PHP may cause some PHP programs which rely on them to fail. The actual cause of this problem having now been identified a patch has been produced and is recorded in Debian ticket: * http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=440272 It isn't know when an updated Debian package for Python may be produced. Python 'pysqlite' Symbol Conflict --------------------------------- Certain versions of 'pysqlite' module defined a global symbol 'cache_init'. This symbol clashes with a similarly named symbol present in the Apache mod_cache module. As a result of the clash, the two modules being loaded at the same time can cause the Apache process to crash or the following Python exception to be raised:: SystemError: NULL result without error in PyObject_Call This problem is mentioned in pysqlite ticket: * http://www.initd.org/tracker/pysqlite/ticket/174 and the release notes for version 2.3.3: * http://www.initd.org/tracker/pysqlite/wiki/2.3.3_Changelog of pysqlite To avoid the problem upgrade to pysqlite 2.3.3 or later. Python Simplified GIL State API ------------------------------- In an attempt to simplify management of thread state objects when coding C extension modules for Python, Python 2.3 introduced the simplified API for GIL state management. Unfortunately, this API will only work if the code is running against the very first Python sub interpreter created when Python is initialised. Because mod_wsgi by default assigns a Python sub interpreter to each WSGI application based on the virtual host and application mount point, code would normally never be executed within the context of the first Python sub interpreter created, instead a distinct Python sub interpreter would be used. The consequences of attempting to use a C extension module for Python which is implemented against the simplified API for GIL state management in any sub interpreter besides the first, is that the code is likely to deadlock or crash the process. The only way around this issue is to ensure that any WSGI application which makes use of C extension modules which use this API, only runs in the very first Python sub interpreter created when Python is initialised. To force a specific WSGI application to be run within the very first Python sub interpreter created when Python is initialised, the WSGIApplicationGroup directive should be used and the group set to '%{GLOBAL}':: WSGIApplicationGroup %{GLOBAL} Extension modules for which this is known to be necessary are any which have been developed using SWIG and for which the ``-threads`` option was supplied to 'swig' when the bindings were generated. One example of this is the 'dbxml' module, a Python wrapper for the Berkeley Database, previously developed by !SleepyCat Software, but now managed by Oracle. Another package believed to have this problem in certain use cases is Xapian. There is also a bit of a question mark over the Python Subversion bindings. This package also uses SWIG, however it is only some versions that appear to require that the very first sub interpreter created when Python is initialised be used. It is currently believed that this may be more to do with coding problems than with the ``-threads`` option being passed to the 'swig' command when the bindings were generated. For all the affected packages, as described above it is believed though that they will work when application group is set to force the application to run in the first interpreter created by Python as described above. Another option for packages which use SWIG generated bindings is not to use the ``-threads`` option when 'swig' is used to generate the bindings. This will avoid any problems and allow the package to be used in any sub interpreter. Do be aware though that by disabling thread support in SWIG bindings, that the GIL isn't released when C code is entered. The consequences of this are that if the C code blocks, the whole Python interpreter environment running in that process will be blocked, even requests being handled within other threads in different sub interpreters. Reloading Python Interpreters ----------------------------- *Note: The "Interpreter" reload mechanism has been removed in mod_wsgi version 2.0. This is because the problems with third party modules didn't make it a viable option. Its continued presence was simply complicating the addition of new features. As an alternative, daemon mode of mod_wsgi should be used and the "Process" reload mechanism added with mod_wsgi 2.0.* To make it possible to modify a WSGI application and have the whole application reloaded without restarting the Apache web server, mod_wsgi provides an interpreter reloading feature. This specific feature is enabled using the WSGIReloadMechanism directive, setting it to the value 'Interpreter' instead of its default value of 'Module':: WSGIReloadMechanism Interpreter When this option is selected and script reloading is also enabled, when the WSGI application script file is modified, the next request which arrives will result in the Python sub interpreter which is hosting that WSGI application being destroyed. A new Python sub interpreter will then be created and the WSGI application reloaded including any changes made to normal Python modules. For many WSGI applications this mechanism will generally work fine, however there are a few limitations on what is reloaded, plus some Python C extension modules can be incompatible with this feature. The first issue is that although Python code modules will be destroyed and reloaded, because a C extension module is only loaded once and used across all Python sub interpreters for the life of the process, any changes to a C extension module will not be picked up. The second issue is that some C extension modules may cache references to the Python interpreter object itself. Because there is no notification mechanism for letting a C extension module know when a sub interpreter is destroyed, it is possible that later on the C extension module may attempt to access the now destroyed Python interpreter. By this time the pointer reference is likely a dangling reference to unused memory or some completely different data and attempting to access or use it will likely cause the process to crash at some point. A third issue is that the C extension module may cache references to Python objects in static variables but not actually increment the reference count on the objects in respect of its own reference to the objects. When the last Python sub interpreter to hold a reference to that Python object is destroyed, the object itself would be destroyed but the static variable left with a dangling pointer. If a new Python sub interpreter is then created and the C extension module attempts to use that cached Python object, accessing it or using it will likely cause the process to crash at some point. A few examples of Python modules which exhibit one or more of these problems are psycopg2, PyProtocols and lxml. In the case of !PyProtocols, because this module is used by TurboGears and sometimes used indirectly by Pylons applications, it means that the interpreter reloading mechanism can not be used with either of these packages. The reason for the problems with !PyProtocols appear to stem from its use of Pyrex generated code. The lxml package similarly uses Pyrex and is thus afflicted. In general it is probably inadvisable to use the interpreter reload mechanism with any WSGI application which uses large or complicated C extension modules. It would be recommended for example that the interpreter reload mechanism not be used with Trac because of its use of the Python Subversion bindings. One would also need to be cautious if using any Python database client, although some success has been seen when using simple database adapters such as pysqlite. Multiple Python Sub Interpreters -------------------------------- In addition to the requirements imposed by the Python GIL, other issues can also arise with C extension modules when multiple Python sub interpreters are being used. Typically these problems arise where an extension module caches a Python object from the sub interpreter which is initially used to load the module and then passes that object to code executing within secondary sub interpreters. The prime example of where this would be a problem is where the code within the second sub interpreter attempts to execute a method of the Python object. When this occurs the result will be an attempt to execute Python code which doesn't belong to the current sub interpreter. One result of this will be that if the code being executed attempts to import additional modules it will obtain those modules from the current sub interpreter rather than the interpreter the code belonged to. The result of this can be a unholy mixing of code and data owned by multiple sub interpreters leading to potential chaos at some point. A more concrete outcome of such a mixing of code and data from multiple sub interpreters is where a file object from one sub interpreter is used within a different sub interpreter. In this sort of situation a Python exception will occur as Python will detect in certain cases that the object didn't belong to that interpreter:: exceptions.IOError: file() constructor not accessible in restricted mode Problems with code being executed in restricted mode can also occur when the Python code and data marshalling features are used:: exceptions.RuntimeError: cannot unmarshal code objects in restricted execution mode A further case is where the cached object is a class object and that object is used to create instances of that type of object for different sub interpreters. As above this can result in an unholy mixing of code and data from multiple sub interpreters, but at a more mundane level may become evident through the 'isinstance()' function failing when used to check the object instances against the local type object for that sub interpreter. An example of a Python module which fails in this way is psycopg2, which caches an instance of the 'decimal.Decimal' type and uses it to create object instances for all sub interpreters. This particular problem in psycopg2 has been reported in psycopg2 ticket: * http://www.initd.org/tracker/psycopg/ticket/192 and has been fixed in pyscopg2 source code. It isn't known however which version of psycopg2 this fix may have been released with. Another package believed to have this problem in certain use cases is lxml. Because of the possibilty that extension module writers have not written their code to take into consideration it being used from multiple sub interpreters, the safest approach is to force all WSGI applications to run within the same application group, with that preferably being the first interpreter instance created by Python. To force a specific WSGI application to be run within the very first Python sub interpreter created when Python is initialised, the WSGIApplicationGroup directive should be used and the group set to '%{GLOBAL}':: WSGIApplicationGroup %{GLOBAL} If it is not feasible to force all WSGI applications to run in the same interpreter, then daemon mode of mod_wsgi should be used to assign different WSGI applications to their own daemon processes. Each would then be made to run in the first Python sub interpreter instance within their respective processes. Memory Constrained VPS Systems ------------------------------ Virtual Private Server (VPS) systems typically always have constraints imposed on them in regard to the amount of memory or resources they are able to use. Various limits and related counts are described below: *Memory Limit* Maximum virtual memory size a VPS/context can allocate. *Used Memory* Virtual memory size used by a running VPS/context. *Max Total Memory* Maximum virtual memory usage by VPS/context. *Context RSS Limit* Maximum resident memory size a VPS/context can allocate. If limit is exceeded, VPS starts to use the host's SWAP. *Context RSS* Resident memory size used by a running VPS/context. *Max RSS Memory* Maximum resident memory usage by VPS/context. *Disk Limit* Maximum disk space that can be used by VPS (calculated for the entire VPS file tree). *Used Disk Memory* Disk space used by a VPS file tree. *Files Limit* Maximum number of files that can be switched to a VPS/context. *Used Files* Number of files used in a VPS/context. *TCP Sockets Limit* Limit on the number of established connections in a virtual server. *Established Sockets* Number of established connections in a virtual server. In respect of the limits, when summary virtual memory size used by the VPS exceeds Memory Limit, processes can't allocate the required memory and will fail in unexpected ways. The general recommendation is that Context RSS Limit be set to be one third of Memory Limit. Some VPS providers however appear to ignore such guidance, not perhaps understanding how virtual memory systems work, and set too restrictive a value on the Memory Limit of the VPS, to the extent that virtual memory use will exceed the Memory Limit even before actual memory use reaches Max RSS Memory or even perhaps before reaching Context RSS Limit. This is especially a problem where the hosted operating system is Linux, as Linux uses a default per thread stack size which is excessive. When using Apache worker MPM with multiple threads, or mod_wsgi daemon mode and multiple worker threads, the amount of virtual memory quickly adds up causing the artificial Memory Limit to be exceeded. Under Linux the default process stack size is 8MB. Where as other UNIX system typically use a much smaller per thread stack size in the order of 512KB, Linux inherits the process stack size and also uses it as the per thread stack size. If running a VPS system and are having problems with Memory Limit being exceeded by the amount of virtual memory set aside by all applications running in the VPS, it will be necessary to override the default per thread stack size as used by Linux. If you are using the Apache worker MPM, you will need to upgrade to Apache 2.2 if you are not already running it. Having done that you should then use the Apache directive !ThreadStackSize to lower the per thread stack size for threads created by Apache for the Apache child processes:: ThreadStackSize 524288 This should drop the amount of virtual memory being set aside by Apache for its child process and thus any WSGI application running under embedded mode. If a WSGI application creates its own threads for performing background activities, it is also preferable that they also override the stack size set aside for that thread. For that you will need to be using at least Python 2.5. The WSGI application should be ammended to execute:: import thread thread.stack_size(524288) If using mod_wsgi daemon mode, you will need to use mod_wsgi 2.0 and override the per thread stack size using the 'stack-size' option to the WSGIDaemonProcess directive:: WSGIDaemonProcess example stack-size=524288 If you are unable to upgrade to Apache 2.2 and/or mod_wsgi 2.0, the only other option you have for affecting the amount of virtual memory set aside for the stack of each thread is to override the process stack size. If you are using a standard Apache distribution, this can be done by adding to the 'envvars' file for the Apache installation:: ulimit -s 512 If using a customised Apache installation, such as on RedHat, the 'envvars' file may not exist. In this case you would need to add this into the actual startup script for Apache. For RedHat this is '/etc/sysconfig/httpd'. Note that although 512KB is given here as an example, you may in practice need to adjust this higher if you are using third party C extension modules for Python which allocate significant amounts of memory on the stack. OpenBSD And Thread Stack Size ----------------------------- When using Linux the excessive amount of virtual memory set aside for the stack of each thread can cause problems in memory constrained VPS systems. Under OpenBSD the opposite problem can occur in that the default per thread stack size can be too small. In this situation the same mechanisms as used above for adjusting the amount of virtual memory set aside can be used, but in this case to increase the amount of memory to be greater than the default value. Although it has been reported that the default per thread stack size on OpenBSD can be a problem, it isn't known what it defaults too and thus whether it is too low, or whether it was just the users specific application which was attempting to allocate too much memory from the stack. Python Oracle Wrappers ---------------------- When using WSGIDaemonProcess directive, it is possible to use the 'display-name' option to set what the name of the process is that will be displayed in output from BSD derived 'ps' programs and some other monitoring programs. This allows one to distinguish the WSGI daemon processes in a process group from the normal Apache 'httpd' processes. The mod_wsgi package accepts the magic string '%{GROUP}' as value to the WSGIDaemonProcess directive to indicate that mod_wsgi should construct the name of the processes based on the name of the process group. Specifically, if you have:: WSGIDaemonprocess mygroup display-name=%{GROUP} then the name of the processes in that process group would be set to the value:: (wsgi:mygroup) This generally works fine, however causes a problem when the WSGI application makes use of the 'cx_Oracle' module for wrapping Oracle client libraries in Python. Specifically, Oracle client libraries can produce the error:: ORA-06413: Connection not open. This appears to be caused by the use of brackets, ie., '()' in the name of the process. It is therefore recommended that you explicitly provide the name to use for the process and avoid these characters and potentially any non alphanumeric characters to be extra safe. This issue is briefly mentioned in: * http://www.dba-oracle.com/t_ora_06413_connection_not_open.htm Non Blocking Module Imports --------------------------- In Python 2.6 non blocking module imports were added as part of the Python C API in the form of the function PyImport_ImportModuleNoBlock(). This function was introduced to prevent deadlocks with module imports in certain circumstances. Unfortunately, for valid reasons or not, use of this function has been sprinkled through Python standard library modules as well as third party modules. Although the function may have been created to fix some underlying issue, its usage has caused a new set of problems for multithreaded programs which defer module importing until after threads have been created. With mod_wsgi this is actually the norm as the default mode of operation is that code is lazily loaded only when the first request arrives which requires it. A classic example of the sorts of problems use of this function causes is the error:: ImportError: Failed to import _strptime because the import lock is held by another thread. This particular error occurs when 'time.strptime()' is called for the first time and it so happens that another thread is in the process of doing a module import and holds the global module import lock. It is believed that the fact this can happen indicates that Python is flawed in using the PyImport_ImportModuleNoBlock(). Unfortunately, when this issue has been highlighted in the past, people seemed to think it was acceptable and the only solution, rather than fixing the Python standard library, was to ensure that all module imports are done before any threads are created. This response is frankly crazy and you can expect all manner of random problems related to this to crop up as more and more people start using the PyImport_ImportModuleNoBlock() function without realising that it is a really bad idea in the context of a multithreaded system. Although no hope is held out for the issue being fixed in Python, a problem report has still been lodged and can be found at:: * http://bugs.python.org/issue8098 The only work around for the problem is to ensure that all module imports related to modules on which the PyImport_ImportModuleNoBlock() function is used be done explicitly or indirectly when the WSGI script file is loaded. Thus, to get around the specific case above, add the following into the WSGI script file:: import _strptime There is nothing that can be done in mod_wsgi to fix this properly as the set of modules that might have to be forceably imported is unknown. Having a hack to import them just to avoid the problem is also going to result in unnecessary memory usage if the application didn't actually need them. mod_wsgi-5.0.0/docs/user-guides/assorted-tips-and-tricks.rst000066400000000000000000000120321452636074700241140ustar00rootroot00000000000000======================== Assorted Tips And Tricks ======================== This document contains various tips and tricks related to using mod_wsgi which don't deserve a document of their own or which don't fit within other documentation. Determining If Running Under mod_wsgi ------------------------------------- As a WSGI application developer you should always be striving to write portable WSGI applications. That is, you should not write your code so as to be dependent on the specific features of a specific WSGI hosting mechanism. This unfortunately is not always possible especially when it comes to deployment due to there being no one blessed way for exposing a WSGI application for hooking into WSGI hosting mechanisms. There may also be times when you might want to rely on a feature of a specific WSGI hosting mechanism, which although not part of the WSGI specification, allows you to do something you wouldn't otherwise. That said, there a few ways in which you can detect that your code is running under mod_wsgi. These fall under two categories. The first being a general mechanism for how to detect if mod_wsgi is being used. The second being additional ways to detect that mod_wsgi is being used when a request is being handled. The simplest way of detecting if mod_wsgi is being used is to import the 'mod_wsgi' module. This is a special embedded mode which is installed automatically by the Apache/mod_wsgi module into set of imported modules, ie., sys.modules. You can thus do:: try: import mod_wsgi # Put code here which should only run when mod_wsgi is being used. except: pass Do note however that although this is an embedded mode added automatically, the way mod_wsgi has been implemented allows in the future for there to be a separate Python package/module distinct from the mod_wsgi.so file called 'mod_wsgi' which might contain additional Python code to support use of mod_wsgi. What would happen if such a separate Python package/module is available is that it will be automatically imported and additional information setup by the Apache/mod_wsgi module then inserted into the global namespace of that Python package/module. The potential existance of this distinct Python package/module means that importing 'mod_wsgi' could one day actually succeed outside of code being run under the Apache/mod_wsgi module. A more correct test therefore is:: try: from mod_wsgi import version # Put code here which should only run when mod_wsgi is being used. except: pass This is different because the 'version' attribute will only be present when running under the Apache/mod_wsgi module as that version relates to the version of mod_wsgi.so. The above import check can be used anywhere, be that in the WSGI script file, or in your application code at either global scope or within the context of a specific function. In the specific case of the WSGI script file, although the above can be used there is an alternate check that can be made. That is to check the value of the '__name__' attribute given to the WSGI script file when the code is loaded into the Python interpreter. The normal situation where one would check the value of '__name__' is where wanting to do something different when a Python code file is executed directly against the Python interpreter as opposed to being imported. For example:: if __name__ == '__main__': ... In contrast, were a Python code file is imported, the '__name__' attribute would be the dotted path which would be used to import the code file. In the case of mod_wsgi, although WSGI script files are imported as if they are a module, because they could exist anywhere and not in locations on the Python module search path, they don't have a conventional dotted path name. Instead they have a magic name built from a md5 hash of the path to the WSGI script file. So as to at least identify this as being related to mod_wsgi, it has the prefix '_mod_wsgi_'. This means a WSGI script file could use:: if __name__.startswith('_mod_wsgi_'): ... if it needed to execute different code based on whether the WSGI script file was actually being loaded by the Apache/mod_wsgi module as opposed to be executed directly as a script by the command line Python interpreter. This latter technique obviously only works in the WSGI script file and not elsewhere. A final method that can be used within the context of the WSGI application handling the request is to interrogate the WSGI environ dictionary passed to the WSGI application. In this case code can look for the presence of the 'mod_wsgi.version' key within the WSGI environ dictionary:: def application(environ, start_response): status = '200 OK' if environ.has_key('mod_wsgi.version'): output = b'Hello mod_wsgi!' else: output = b'Hello other WSGI hosting mechanism!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] mod_wsgi-5.0.0/docs/user-guides/checking-your-installation.rst000066400000000000000000000636601452636074700245410ustar00rootroot00000000000000========================== Checking Your Installation ========================== When debugging mod_wsgi or a WSGI application, it is import to be able to understand how mod_wsgi has been installed, what Apache and/or Python it uses and how those systems have been configured, plus under what configuration the WSGI application is running. This document details various such checks that can be made. The primary purpose of providing this information is so that when people ask questions on the mod_wsgi mailing list, they can be directed here to perform certain checks as a way of collecting additional information needed to help debug their problem. If you are reading this document because you have been directed here from the mailing list, then ensure that you actually provide the full amount of detail obtained from the checks and not leave out information. When you leave out information then it means guesses have to be made about your setup which makes it harder to debug your problems. Apache Build Information ------------------------ Information related to what version of Apache is being used and how it is built is obtained in a number of ways. The primary means is from the Apache 'httpd' executable itself using command line options. The main such option is the ``-V`` option. On most systems the standard Apache executable supplied with the operating system is located at '/usr/sbin/httpd'. On MacOS X, for the operating system supplied Apache the output from this is:: $ /usr/sbin/httpd -V Server version: Apache/2.2.14 (Unix) Server built: Feb 10 2010 22:22:39 Server's Module Magic Number: 20051115:23 Server loaded: APR 1.3.8, APR-Util 1.3.9 Compiled using: APR 1.3.8, APR-Util 1.3.9 Architecture: 64-bit Server MPM: Prefork threaded: no forked: yes (variable process count) Server compiled with.... -D APACHE_MPM_DIR="server/mpm/prefork" -D APR_HAS_SENDFILE -D APR_HAS_MMAP -D APR_HAVE_IPV6 (IPv4-mapped addresses enabled) -D APR_USE_FLOCK_SERIALIZE -D APR_USE_PTHREAD_SERIALIZE -D SINGLE_LISTEN_UNSERIALIZED_ACCEPT -D APR_HAS_OTHER_CHILD -D AP_HAVE_RELIABLE_PIPED_LOGS -D DYNAMIC_MODULE_LIMIT=128 -D HTTPD_ROOT="/usr" -D SUEXEC_BIN="/usr/bin/suexec" -D DEFAULT_PIDLOG="/private/var/run/httpd.pid" -D DEFAULT_SCOREBOARD="logs/apache_runtime_status" -D DEFAULT_LOCKFILE="/private/var/run/accept.lock" -D DEFAULT_ERRORLOG="logs/error_log" -D AP_TYPES_CONFIG_FILE="/private/etc/apache2/mime.types" -D SERVER_CONFIG_FILE="/private/etc/apache2/httpd.conf" The most important details here are: * The version of Apache from the 'Server version' entry. * The MPM which Apache has been compiled to use from the 'Server MPM' entry. Although this has a section which appears to indicate what preprocessor options the server was compiled with, it is a massaged list. What is often more useful is the actual arguments which were supplied to the 'configure' command when Apache was built. To determine this information you need to do the following. * Work out where 'apxs2' or 'apxs' is installed. * Open this file and find setting for '$installbuilddir'. * Open the 'config.nice' file in the directory specified for build directory. On MacOS X, for the operating system supplied Apache this file is located at '/usr/share/httpd/build/config.nice'. The contents of the file is:: #! /bin/sh # # Created by configure "/SourceCache/apache/apache-747.1/httpd/configure" \ "--prefix=/usr" \ "--enable-layout=Darwin" \ "--with-apr=/usr" \ "--with-apr-util=/usr" \ "--with-pcre=/usr/local/bin/pcre-config" \ "--enable-mods-shared=all" \ "--enable-ssl" \ "--enable-cache" \ "--enable-mem-cache" \ "--enable-proxy-balancer" \ "--enable-proxy" \ "--enable-proxy-http" \ "--enable-disk-cache" \ "$@" Not only does this indicate what features of Apache have been compiled in, it also indicates by way of the ``--enable-layout`` option what custom Apache installation layout has been used. Apache Modules Loaded --------------------- Modules can be loaded into Apache statically, or can be loaded dynamically at run time based on Apache configuration files. If modules have been statically compiled into Apache, usually it would be evident by what 'configure' arguments have been used when Apache was built. To verify exactly what is compiled statically, you can use the ``-l`` option to the Apache executable. On MacOS X, for the operating system supplied Apache the output from running ``-l`` option is:: $ /usr/sbin/httpd -l Compiled in modules: core.c prefork.c http_core.c mod_so.c This indicates that the only module that is loaded statically is 'mod_so'. This is actually the Apache module that handles the task of dynamically loading other Apache modules. For a specific Apache configuration, you can determine what Apache modules will be loaded dynamically by using the ``-M`` option for the Apache executable. On MacOS X, for the operating system supplied Apache the output from running ``-M`` option, where the only additional module added is mod_wsgi, is:: $ /usr/sbin/httpd -M Loaded Modules: core_module (static) mpm_prefork_module (static) http_module (static) so_module (static) authn_file_module (shared) authn_dbm_module (shared) authn_anon_module (shared) authn_dbd_module (shared) authn_default_module (shared) authz_host_module (shared) authz_groupfile_module (shared) authz_user_module (shared) authz_dbm_module (shared) authz_owner_module (shared) authz_default_module (shared) auth_basic_module (shared) auth_digest_module (shared) cache_module (shared) disk_cache_module (shared) mem_cache_module (shared) dbd_module (shared) dumpio_module (shared) ext_filter_module (shared) include_module (shared) filter_module (shared) substitute_module (shared) deflate_module (shared) log_config_module (shared) log_forensic_module (shared) logio_module (shared) env_module (shared) mime_magic_module (shared) cern_meta_module (shared) expires_module (shared) headers_module (shared) ident_module (shared) usertrack_module (shared) setenvif_module (shared) version_module (shared) proxy_module (shared) proxy_connect_module (shared) proxy_ftp_module (shared) proxy_http_module (shared) proxy_ajp_module (shared) proxy_balancer_module (shared) ssl_module (shared) mime_module (shared) dav_module (shared) status_module (shared) autoindex_module (shared) asis_module (shared) info_module (shared) cgi_module (shared) dav_fs_module (shared) vhost_alias_module (shared) negotiation_module (shared) dir_module (shared) imagemap_module (shared) actions_module (shared) speling_module (shared) userdir_module (shared) alias_module (shared) rewrite_module (shared) bonjour_module (shared) wsgi_module (shared) Syntax OK The names reflect that which would have been used with the LoadModule line in the Apache configuration and not the name of the module file itself. The order in which modules are listed can be important in some cases where a module doesn't explicitly designate in what order a handler should be applied relative to other Apache modules. Global Accept Mutex ------------------- Because Apache is a multi process server, it needs to use a global cross process mutex to control which of the Apache child processes get the next chance to accept a connection from a HTTP client. This cross process mutex can be implemented using a variety of different mechanisms and exactly which is used can vary based on the operating system. Which mechanism is used can also be overridden in the Apache configuration if absolutely required. A simlar instance of a cross process mutex is also used for each mod_wsgi daemon process group to mediate which process in the daemon process group gets to accept the next request proxied to that daemon process group via the Apache child processes. The list of mechanisms which might be used to implement the cross process mutex are as follows: * flock * fcntl * sysvsem * posixsem * pthread In the event that there are issues which communicating between the Apache child processes and the mod_wsgi daemon process in particular, it can be useful to know what mechanism is used to implement the cross process mutex. By default, the Apache configuration files would not specify a specific mechanism, and instead which is used would be automatically selected by the underlying Apache runtime libraries based on various build time and system checks about what is the prefered mechanism for a particular operating system. Which mechanism is used by default can be determined from the build information displayed by the ``-V`` option to the Apache executable described previously. The particular entries of interest are those with 'SERIALIZE' in the name of the macro. On MacOS X, using operating system supplied Apache, the entries of interest are:: -D APR_USE_FLOCK_SERIALIZE -D APR_USE_PTHREAD_SERIALIZE As the entries are used in order, what this indicates is that Apache will by default use the 'flock' mechanism to implement the cross process mutex. In comparison, on a Linux system, the entries of interest may be:: -D APR_USE_SYSVSEM_SERIALIZE -D APR_USE_PTHREAD_SERIALIZE which indicates that 'sysvsem' mechanism is instead used. This mechanism is also what would be used by mod_wsgi by default as well for the cross process mutex for daemon process groups. This mechanism will be different where the AcceptMutex and WSGIAcceptMutex directives are used. If the AcceptMutex directive is defined in the Apache configuration file, then what ever mechanism is specified will be used instead for Apache child processes. Provided that Apache 2.2 or older is used, and WSGIAcceptMutex is not specified, then when AcceptMutex is used, that will also then be used by mod_wsgi daemon processes as well. In the case of Apache 2.4 and later, AcceptMutex will no longer override the default for mod_wsgi daemon process groups, and instead WSGIAcceptMutex must be specified seperately if it needs to be overridden for both. Either way, you should check the Apache configuration files as to whether either AcceptMutex or WSGIAcceptMutex directives are used as they will override the defaults calculated above. Under normal circumstances neither should be set as default would always be used. If wanting to look at overriding the default mechanism, what options exist for what mechanism can be used will be dependent on the operating system being used. There are a couple of ways this can be determined. The first is to find the 'apr.h' header file from the Apache runtime library installation that Apache was compiled against. In that you will find entries similar to the 'USE' macros above. You will also find 'HAS' entries. In this case we are interested in the 'HAS' entries. On MacOS X, with the operating system supplied APR library, the entries in 'apr.h' are:: #define APR_HAS_FLOCK_SERIALIZE 1 #define APR_HAS_SYSVSEM_SERIALIZE 1 #define APR_HAS_POSIXSEM_SERIALIZE 1 #define APR_HAS_FCNTL_SERIALIZE 1 #define APR_HAS_PROC_PTHREAD_SERIALIZE 0 The available mechanisms are those defined to be '1'. Finding where the right 'apr.h' is located may be tricky, so an easier way is to trick Apache into generating an error message to list what the available mechanisms are. To do this, in turn, add entries into the Apache configuration files, at global scope of:: AcceptMutex xxx and:: WSGIAcceptMutex xxx For each run the ``-t`` option on the Apache program executable. On MacOS X, with the operating system supplied APR library, this yields:: $ /usr/sbin/httpd -t Syntax error on line 501 of /private/etc/apache2/httpd.conf: xxx is an invalid mutex mechanism; Valid accept mutexes for this platform \ and MPM are: default, flock, fcntl, sysvsem, posixsem. for AcceptMutex and for WSGIAcceptMutex:: $ /usr/sbin/httpd -t Syntax error on line 501 of /private/etc/apache2/httpd.conf: Accept mutex lock mechanism 'xxx' is invalid. Valid accept mutex mechanisms \ for this platform are: default, flock, fcntl, sysvsem, posixsem. The list of available mechanisms should normally be the same in both cases. Using the value of 'default' indicates that which mechanism is used is left up to the APR library. Python Shared Library --------------------- When mod_wsgi is built, the 'mod_wsgi.so' file should be linked against Python via a shared library. If it isn't and it is linked against a static library, various issues can arise. These include additional memory usage, plus conflicts with mod_python if it is also loaded in same Apache. To validate that 'mod_wsgi.so' is using a shared library for Python, on most UNIX systems the 'ldd' command is used. For example:: $ ldd mod_wsgi.so linux-vdso.so.1 => (0x00007fffeb3fe000) libpython2.5.so.1.0 => /usr/local/lib/libpython2.5.so.1.0 (0x00002adebf94d000) libpthread.so.0 => /lib/libpthread.so.0 (0x00002adebfcba000) libdl.so.2 => /lib/libdl.so.2 (0x00002adebfed6000) libutil.so.1 => /lib/libutil.so.1 (0x00002adec00da000) libc.so.6 => /lib/libc.so.6 (0x00002adec02dd000) libm.so.6 => /lib/libm.so.6 (0x00002adec0635000) /lib64/ld-linux-x86-64.so.2 (0x0000555555554000) What you want to see is a reference to an instance of 'libpythonX.Y.so'. Normally the operating system shared library version suffix would always be '1.0'. What it is shouldn't really matter though. This reference should refer to the actual Python shared library for your Python installation. Do note though, that 'ldd' will take into consideration any local user setting of the 'LD_LIBRARY_PATH' environment variable. That is, 'ldd' will also search any directories listed in that environment variable for shared libraries. Although that environment variable may be defined in your user account, it will not normally be defined in the environment of the account that Apache starts up as. Thus, it is important that you unset the 'LD_LIBRARY_PATH' environment variable when running 'ldd'. If you run the check with and without 'LD_LIBRARY_PATH' set and find that without it that a different, or no Python shared library is found, then you will likely have a problem. For the case of it not being found, Apache will fail to start. For where it is found but it is a different installation to that which you want used, subtle problems could occur due to C extension modules for Python being used which were compiled against that installation. For example, if 'LD_LIBRARY_PATH' contained the directory '/usr/local/lib' and you obtained the results above, but when you unset it, it picked up shared library from '/usr/lib' instead, then you may end up with problems if for a different installation. In this case you would see:: $ unset LD_LIBRARY_PATH $ ldd mod_wsgi.so linux-vdso.so.1 => (0x00007fffeb3fe000) libpython2.5.so.1.0 => /usr/lib/libpython2.5.so.1.0 (0x00002adebf94d000) libpthread.so.0 => /lib/libpthread.so.0 (0x00002adebfcba000) libdl.so.2 => /lib/libdl.so.2 (0x00002adebfed6000) libutil.so.1 => /lib/libutil.so.1 (0x00002adec00da000) libc.so.6 => /lib/libc.so.6 (0x00002adec02dd000) libm.so.6 => /lib/libm.so.6 (0x00002adec0635000) /lib64/ld-linux-x86-64.so.2 (0x0000555555554000) Similarly, if not found at all, you would see:: $ unset LD_LIBRARY_PATH $ ldd mod_wsgi.so linux-vdso.so.1 => (0x00007fffeb3fe000) libpython2.5.so.1.0 => not found libpthread.so.0 => /lib/libpthread.so.0 (0x00002adebfcba000) libdl.so.2 => /lib/libdl.so.2 (0x00002adebfed6000) libutil.so.1 => /lib/libutil.so.1 (0x00002adec00da000) libc.so.6 => /lib/libc.so.6 (0x00002adec02dd000) libm.so.6 => /lib/libm.so.6 (0x00002adec0635000) /lib64/ld-linux-x86-64.so.2 (0x0000555555554000) If you have this problem, then it would be necessary to set 'LD_RUN_PATH' environment variable to include directory containing where Python library resides when building mod_wsgi, or set 'LD_LIBRARY_PATH' in startup file for Apache such that it is also set for Apache when run. For standard Apache installation the latter would be done in 'envvars' file in same directory as Apache program executable. For some Linux installations would need to be done in init scripts for Apache. Note that MacOS X doesn't use 'LD_LIBRARY_PATH' nor have 'ldd'. On MacOS X, instead of 'ldd' you can use 'otool -L':: $ otool -L mod_wsgi.so mod_wsgi.so: /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 125.2.0) /System/Library/Frameworks/Python.framework/Versions/2.6/Python (compatibility version 2.6.0, current version 2.6.1) If using standard MacOS X compilers and not using Fink or !MacPorts, there generally should not ever be any issues with whether it is a shared library or not as everything should just work. The only issue with MacOS X is that for whatever reason, the location dependency for the shared library (framework) isn't always encoded into 'mod_wsgi.so' correctly. This seems to vary between what Python installation was used and what MacOS X operating system version. In this case, if multiple installations of same version of Python in different locations, may find the system installation rather than your custom installation. In that situation you may need to use the ``--disable-framework`` option to 'configure' script for mod_wsgi. This doesn't actually disable use of the framework, but does change how it links to use a more traditional library style linking rather than framework linking. This seems to resolve the problems in most cases. Python Installation In Use -------------------------- Although the 'mod_wsgi.so' file may be finding a specific Python shared library and that may be from the correct installation, the Python library when initialised doesn't actually know from where it came. As such, it uses a series of checks to try and determine where the Python installation is actually located. This check has various subtleties and how it works varies depending on the platform used. At its simplest though, on most UNIX systems it will check all directories listed in the 'PATH' environment variable of the process. In each of those directories it will look for the 'python' program. When it finds such a file, it will then look for a corresponding 'lib' directory containing a valid Python installation for the same version of Python as is being run. When it finds such a directory, the home for the Python installation will be taken as being the parent directory of the directory containing the 'python' program file found. Because this search is dependent on the 'PATH' environment variable, which is likely set to a minimal set of directories for the Apache user, then if you are using a Python installation in a non standard location, then it may not properly find the location of that installation. The easiest way to validate which Python installation is being used is to use a test WSGI script to output the value of 'sys.prefix':: import sys def application(environ, start_response): status = '200 OK' output = u'' output += u'sys.version = %s\n' % repr(sys.version) output += u'sys.prefix = %s\n' % repr(sys.prefix) response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output.encode('UTF-8')] For standard Python installation on a Linux system, this would produce something like:: sys.version = "'2.6.1 (r261:67515, Feb 11 2010, 00:51:29) \\n[GCC 4.2.1 (Apple Inc. build 5646)]'" sys.prefix = '/usr' Thus, if you were expecting to pick up a separate Python installation located under '/usr/local' or elsewhere, this would be indicative of a problem. It can also be worthwhile to check that the Python module search path also looks correct. This can be done by using a test WSGI script to output the value of 'sys.path':: import sys def application(environ, start_response): status = '200 OK' output = u'sys.path = %s' % repr(sys.path) response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output.encode('UTF-8')] In both cases, even if incorrect location is being used for Python installation and even if there is no actual Python installation of the correct version under that root directory, then these test scripts should still run as 'sys' module is a builtin module which can be satisified via just the Python library. If debugging, whether there is a Python installation underneath that root directory, the subdirectory which you would want to look for is 'lib/pythonX.Y' corresponding to version of Python being used. If the calculated directory is wrong, then you will need to use the WSGIPythonHome directory to set the location to the correct value. The value to use is what 'sys.prefix' is set to when the correct Python is run from the command line and 'sys.prefix' output:: >>> import sys >>> print sys.prefix /usr/local Thus for case where installed under '/usr/local', would use:: WSGIPythonHome /usr/local Embedded Or Daemon Mode ----------------------- WSGI applications can run in either embedded mode or daemon mode. In the case of embedded mode, the WSGI application runs within the Apache child processes themselves. In the case of daemon mode, they run within a separate set of processes managed by mod_wsgi. To determine what mode a WSGI application is running under, replace its WSGI script with the test WSGI script as follows:: import sys def application(environ, start_response): status = '200 OK' output = u'mod_wsgi.process_group = %s' % repr(environ['mod_wsgi.process_group']) response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output.encode('UTF-8')] If the configuration is such that the WSGI application is running in embedded mode, then you will see:: mod_wsgi.process_group = '' This actually corresponds to the directive:: WSGIProcessGroup %{GLOBAL} having being used, or the same value being used to the 'process-group' directive of WSGIScriptAlias. Do note though that these are also actually the defaults for these if not explicitly defined. If the WSGI application is actually running in daemon mode, then a non empty string will instead be shown corresponding to the name of the daemon process group used. Sub Interpreter Being Used -------------------------- As well as WSGI application being able to be delegated to run in either embedded mode or daemon mode, within the process it ends up running in, it can be delegated to a specific Python sub interpreter. To determine which Python sub interpreter is being used within the process the WSGI application is being run use the test WSGI script of:: import sys def application(environ, start_response): status = '200 OK' output = u'mod_wsgi.application_group = %s' % repr(environ['mod_wsgi.application_group']) response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output.encode('UTF-8')] If being run in the main interpreter, ie., the first interpreter created by Python, this will output:: mod_wsgi.application_group = '' This actually corresponds to the directive:: WSGIApplicationGroup %{GLOBAL} having being used, or the same value being used to the 'application-group' directive of WSGIScriptAlias. The default for these if not defined is actually '%{RESOURCE}'. This will be a value made up from the name of the virtual host or server, the port on which connection was accepted and the mount point of the WSGI application. The port however is actually dropped where port is 80 or 443. An example of what you would expect to see is:: mod_wsgi.application_group = 'tests.example.com|/interpreter.wsgi' This corresponds to server name of 'tests.example.com' with connection received on either port 80 or 443 and where WSGI application was mounted at the URL of '/interpreter.wsgi'. Single Or Multi Threaded ------------------------ Apache supports differing Multiprocessing Modules (MPMs) having different attributes. One such difference is whether a specific Apache child process uses multiple threads for handling requests or whether a single thread is instead used. Depending on how you configure a daemon process group when using daemon mode will also dictate whether single or multithreaded. By default, if number of threads is not explicitly specified for a daemon process group, it will be multithreaded. Whether a WSGI application is executing within a multithreaded environment is important to know. If it is, then you need to ensure that your own code and any framework you are using is also thread safe. A test WSGI script for validating whether WSGI application running in multithread configuration is as follows:: import sys def application(environ, start_response): status = '200 OK' output = u'wsgi.multithread = %s' % repr(environ['wsgi.multithread']) response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output.encode('UTF-8')] If multithreaded, this will yield:: wsgi.multithread = True Multithreaded would usually be true on Windows, on UNIX if running in embedded mode and worker MPM is used by Apache, or if using daemon mode and number of threads not explicitly set, or number of threads explicitly set to value other than '1'. mod_wsgi-5.0.0/docs/user-guides/configuration-guidelines.rst000066400000000000000000000775151452636074700242740ustar00rootroot00000000000000======================== Configuration Guidelines ======================== The purpose of this document is to detail the basic configuration steps required for running WSGI applications with mod_wsgi. The WSGIScriptAlias Directive ----------------------------- Configuring Apache to run WSGI applications using mod_wsgi is similar to how Apache is configured to run CGI applications. To streamline this task however, an additional configuration directive called WSGIScriptAlias is provided. Like the ScriptAlias directive for CGI scripts, the mod_wsgi directive combines together a number of steps so as to reduce the amount of configuration required. The first way of using the WSGIScriptAlias directive to indicate the WSGI application to be used, is to associate a WSGI application against a specific URL prefix:: WSGIScriptAlias /myapp /usr/local/wsgi/scripts/myapp.wsgi The last option to the directive in this case must be a full pathname to the actual code file containing the WSGI application. A trailing slash should never be added to the last option when it is referring to an actual file. The WSGI application contained within the code file specified should be called 'application'. For example:: def application(environ, start_response): status = '200 OK' output = b'Hello World!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] Note that an absolute pathname to a WSGI script file must be provided. It is not possible to specify an application by Python module name alone. A full path is used for a number of reasons, the main one being so that all the Apache access controls can still be applied to indicate who can actually access the WSGI application. Because these access controls will apply, if the WSGI application is located outside of any directories already known to Apache, it will be necessary to tell Apache that files within that directory can be used. To do this the Directory directive must be used:: Order allow,deny Allow from all = 2.4> Require all granted Note that Apache access control directives such as Order and Allow, or Require in the case of Apache 2.4 or newer, should nearly always be applied to Directory and never to a Location. Adding them to a Location would not be regarded as best practice and would potentially weaken the security of your Apache server, especially where the Location was for '/'. As for CGI scripts and the ScriptAlias directive, it is not necessary to have used the Options directive to enable the ExecCGI directive. This is because it is automatically implied from the use of the WSGIScriptAlias directive that the script must be executable. For WSGIScriptAlias, to mount a WSGI application at the root of the web site, simply use '/' as the mount point:: WSGIScriptAlias / /usr/local/wsgi/scripts/myapp.wsgi If you need to mount multiple WSGI applications, the directives can be listed more than once. When this occurs, those occuring first are given precedence. As such, those which are mounted at what would be a sub URL to another WSGI application, should always be listed earlier:: WSGIScriptAlias /wiki /usr/local/wsgi/scripts/mywiki.wsgi WSGIScriptAlias /blog /usr/local/wsgi/scripts/myblog.wsgi WSGIScriptAlias / /usr/local/wsgi/scripts/myapp.wsgi The second way of using the WSGIScriptAlias directive is to use it to map to a directory containing any number of WSGI applications:: WSGIScriptAlias /wsgi/ /usr/local/wsgi/scripts/ When this is used, the next part of the URL after the URL prefix is used to identify which WSGI application script file within the target directory should be used. Both the mount point and the directory path must have a trailing slash. If you want WSGI application scripts to use an extension, but don't wish to have that extension appear in the URL, then it is possible to use the WSGIScriptAliasMatch directive instead:: WSGIScriptAliasMatch ^/wsgi/([^/]+) /usr/local/wsgi/scripts/$1.wsgi In this case, any path information appearing after the URL prefix, will be mapped to a corresponding WSGI script file in the directory, but with a '.wsgi' extension. The extension would though not need to be included in the URL. In all ways that the WSGIScriptAlias can be used, the target script is not required to have any specific extension type and in particular it is not necessary to use a '.py' extension just because it contains Python code. Because the target script is not treated exactly like a traditional Python module, if an extension is used, it is recommended that '.wsgi' be used rather than '.py'. The Apache Alias Directive -------------------------- Although the WSGIScriptAlias directive is provided, the traditional Alias directive can still be used to enable execution of WSGI applications for specific URLs. The equivalent such configuration for:: WSGIScriptAlias /wsgi/ /usr/local/wsgi/scripts/ Order allow,deny Allow from all = 2.4> Require all granted using the Alias directive would be:: Alias /wsgi/ /usr/local/wsgi/scripts/ Options ExecCGI SetHandler wsgi-script Order allow,deny Allow from all = 2.4> Require all granted The additional steps required in this case are to enable the ability to execute CGI like scripts using the Options directive and define the Apache handler as 'wsgi-script'. If wishing to hold a mixture of static files, normal CGI scripts and WSGI applications within the one directory, the AddHandler directive can be used instead of the SetHandler directive to distinguish between the various resource types based on resource extension:: Alias /wsgi/ /usr/local/wsgi/scripts/ Options ExecCGI AddHandler cgi-script .cgi AddHandler wsgi-script .wsgi Order allow,deny Allow from all = 2.4> Require all granted For whatever extension you use to identify a WSGI script file, ensure that you do not have a conflicting definition for that extension marking it as a CGI script file. For example, if you previously had all '.py' files being handled as 'cgi-script', consider disabling that before marking '.py' file as then being handled as 'wsgi-script' file in same context. If both are defined in same context, which is used will depend on the order of the directives and the wrong handler may be selected. Because an extension is required to determine whether a script should be processed as a CGI script versus a WSGI application, the extension would need to appear in the URL. If this is not desired, then add the MultiViews option and MultiviewsMatch directive:: Alias /wsgi/ /usr/local/wsgi/scripts/ Options ExecCGI MultiViews MultiviewsMatch Handlers AddHandler cgi-script .cgi AddHandler wsgi-script .wsgi Order allow,deny Allow from all = 2.4> Require all granted Adding of MultiViews in this instance and allowing multiviews to match Apache handlers will allow the extension to be dropped from the URL. Provided that for each resource there is only one alternative, Apache will then automatically select either the CGI script or WSGI application as appropriate for that resource. Use of multiviews in this way would make it possible to transparently migrate from CGI scripts to WSGI applications without the need to change any URLs. A benefit of using the AddHandler directive as described above, is that it also allows a directory index page or directory browsing to be enabled for the directory. To enable directory browsing add the Indexes option:: Alias /wsgi/ /usr/local/wsgi/scripts/ Options ExecCGI Indexes AddHandler cgi-script .cgi AddHandler wsgi-script .wsgi Order allow,deny Allow from all = 2.4> Require all granted If a directory index page is enabled, it may refer to either a static file, CGI or WSGI application. The DirectoryIndex directive should be used to designate what should be used for the index page:: Alias /wsgi/ /usr/local/wsgi/scripts/ Options ExecCGI Indexes DirectoryIndex index.html index.wsgi index.cgi AddHandler cgi-script .cgi AddHandler wsgi-script .wsgi Order allow,deny Allow from all = 2.4> Require all granted Using AddHandler or SetHandler to configure a WSGI application can also be done from within the '.htaccess' file located within the directory which a URL maps to. This will however only be possible where the directory has been enabled to allow these directives to be used. This would be done using the AllowOverride directive and enabling FileInfo for that directory. It would also be necessary to allow the execution of scripts using the Options directive by listing ExecCGI:: Alias /site/ /usr/local/wsgi/site/ AllowOverride FileInfo Options ExecCGI MultiViews Indexes MultiviewsMatch Handlers Order allow,deny Allow from all = 2.4> Require all granted This done, the '.htaccess' file could then contain:: DirectoryIndex index.html index.wsgi index.cgi AddHandler cgi-script .cgi AddHandler wsgi-script .wsgi Note that the DirectoryIndex directive can only be used to designate a simple WSGI application which returns a single page for when the URL maps to the actual directory. Because the DirectoryIndex directive is not applied when the URL has additional path information beyond the leading portion of the URL which mapped to the directory, it cannot be used as a means of making a complex WSGI application responding to numerous URLs appear at the root of a server. When using the AddHandler directive, with WSGI applications identified by the extension of the script file, the only way to make the WSGI application appear as the root of the server is to perform on the fly rewriting of the URL internal to Apache using mod_rewrite. The required rules for mod_rewrite to ensure that a WSGI application, implemented by the script file 'site.wsgi' in the root directory of the virtual host, appears as being mounted on the root of the virtual host would be:: RewriteEngine On RewriteCond %{REQUEST_FILENAME} !-f RewriteRule ^(.*)$ /site.wsgi/$1 [QSA,PT,L] Do note however that when the WSGI application is executed for a request the 'SCRIPT_NAME' variable indicating what the mount point of the application was will be '/site.wsgi'. This will mean that when a WSGI application constructs an absolute URL based on 'SCRIPT_NAME', it will include 'site.wsgi' in the URL rather than it being hidden. As this would probably be undesirable, many web frameworks provide an option to override what the value for the mount point is. If such a configuration option isn't available, it is just as easy to adjust the value of 'SCRIPT_NAME' in the 'site.wsgi' script file itself:: def _application(environ, start_response): # The original application. ... import posixpath def application(environ, start_response): # Wrapper to set SCRIPT_NAME to actual mount point. environ['SCRIPT_NAME'] = posixpath.dirname(environ['SCRIPT_NAME']) if environ['SCRIPT_NAME'] == '/': environ['SCRIPT_NAME'] = '' return _application(environ, start_response) This wrapper will ensure that 'site.wsgi' never appears in the URL as long as it wasn't included in the first place and that access was always via the root of the web site instead. Application Configuration ------------------------- If it is necessary or desired to be able to pass configuration information through to a WSGI application from the Apache configuration file, then the SetEnv directive can be used:: WSGIScriptAlias / /usr/local/wsgi/scripts/demo.wsgi SetEnv demo.templates /usr/local/wsgi/templates SetEnv demo.mailhost mailhost SetEnv demo.debugging 0 Any such variables added using the SetEnv directive will be automatically added to the WSGI environment passed to the application when executed. Note that the WSGI environment is passed upon each request to the application in the 'environ' argument of the application object. This environment is totally unrelated to the process environment which is kept in 'os.environ'. The SetEnv directive has no effect on 'os.environ' and there is no way through Apache configuration directives to affect what is in the process environment. If needing to dynamically set variables based on some aspects of the request itself, the RewriteRule directive may also be useful in some cases as an avenue to set application configuration variables. For example, to enable additional debug only when the client is connecting from the localhost, the following might be used:: SetEnv demo.debugging 0 RewriteEngine On RewriteCond %{REMOTE_ADDR} ^127.0.0.1$ RewriteRule . - [E=demo.debugging:1] More elaborate schemes involving RewriteMap could also be employed. Where SetEnv and RewriteRule are insufficient, then any further application configuration should be injected into an application using a WSGI application wrapper within the WSGI application script file:: def _application(environ, start_response): ... def application(environ, start_response): if environ['REMOTE_ADDR'] in ['127.0.0.1']: environ['demo.debugging'] = '1' return _application(environ, start_response) User Authentication ------------------- As is the case when using CGI scripts with Apache, authorisation headers are not passed through to WSGI applications. This is the case, as doing so could leak information about passwords through to a WSGI application which should not be able to see them when Apache is performing authorisation. Unlike CGI scripts however, when using mod_wsgi, the WSGIPassAuthorization directive can be used to control whether HTTP authorisation headers are passed through to a WSGI application in the ``HTTP_AUTHORIZATION`` variable of the WSGI application environment when the equivalent HTTP request headers are present. This option would need to be set to ``On`` if the WSGI application was to handle authorisation rather than Apache doing it:: WSGIPassAuthorization On If Apache is performing authorisation and not the WSGI application, a WSGI application can still find out what type of authorisation scheme was used by checking the variable ``AUTH_TYPE`` of the WSGI application environment. The login name of the authorised user can be determined by checking the variable ``REMOTE_USER``. Hosting Of Static Files ----------------------- When the WSGIScriptAlias directive is used to mount an application at the root of the web server for a host, all requests for that host will be processed by the WSGI application. If is desired for performance reasons to still use Apache to host static files associated with the application, then the Alias directive can be used to designate the files and directories which should be served in this way:: Alias /robots.txt /usr/local/wsgi/static/robots.txt Alias /favicon.ico /usr/local/wsgi/static/favicon.ico AliasMatch /([^/]*\.css) /usr/local/wsgi/static/styles/$1 Alias /media/ /usr/local/wsgi/static/media/ Order allow,deny Allow from all = 2.4> Require all granted WSGIScriptAlias / /usr/local/wsgi/scripts/myapp.wsgi Order allow,deny Allow from all = 2.4> Require all granted When listing the directives, list those for more specific URLs first. In practice this shouldn't actually be required as the Alias directive should take precedence over WSGIScriptAlias, but good practice all the same. Do note though that if using Apache 1.3, the Alias directive will only take precedence over WSGIScriptAlias if the mod_wsgi module is loaded prior to the mod_alias module. To ensure this, the LoadModule/AddModule directives are used. Note that there is never a need to use SetHandler to reset the Apache content handler back to 'None' for URLs mapped to static files. That this is a requirement for mod_python is a short coming in mod_python, do not do the same thing for mod_wsgi. Limiting Request Content ------------------------ By default Apache does not limit the amount of data that may be pushed to the server via a HTTP request such as a POST. That this is the case means that malicious users could attempt to overload a server by attempting to upload excessively large amounts of data. If a WSGI application is not designed properly and doesn't limit this itself in some way, and attempts to load the whole request content into memory, it could cause an application to exhaust available memory. If it is unknown if a WSGI application properly protects itself against such attempts to upload excessively large amounts of data, then the Apache LimitRequestBody directive can be used:: LimitRequestBody 1048576 The argument to the LimitRequestBody should be the maxumum number of bytes that should be allowed in the content of a request. When this directive is used, mod_wsgi will perform the check prior to actually passing a request off to a WSGI application. When the limit is exceeded mod_wsgi will immediately return the HTTP 413 error response without even invoking the WSGI application to handle the request. Any request content will not be read as the client connection will then be closed. Note that the HTTP 413 error response page will be that defined by Apache, or as specified by the Apache ErrorDocument directive for that error type. Defining Application Groups --------------------------- By default each WSGI application is placed into its own distinct application group. This means that each application will be given its own distinct Python sub interpreter to run code within. Although this means that applications will be isolated and cannot in general interfere with the Python code components of each other, each will load its own copy of all Python modules it requires into memory. If you have many applications and they use a lot of different Python modules this can result in large process sizes. To avoid large process sizes, if you know that applications within a directory can safely coexist and run together within the same Python sub interpreter, you can specify that all applications within a certain context should be placed in the same application group. This is indicated by using the WSGIApplicationGroup directive:: WSGIApplicationGroup admin-scripts Order allow,deny Allow from all = 2.4> Require all granted The argument to the WSGIApplicationGroup directive can in general be any unique name of your choosing, although there are also a number of special values which you can use as well. For further information about these special values see the more detailed documentation on the :doc:`../configuration-directives/WSGIApplicationGroup` directive. Two of the special values worth highlighting are: **%{GLOBAL}** The application group name will be set to the empty string. Any WSGI applications in the global application group will always be executed within the context of the first interpreter created by Python when it is initialised. Forcing a WSGI application to run within the first interpreter can be necessary when a third party C extension module for Python has used the simplified threading API for manipulation of the Python GIL and thus will not run correctly within any additional sub interpreters created by Python. **%{ENV:variable}** The application group name will be set to the value of the named environment variable. The environment variable is looked-up via the internal Apache notes and subprocess environment data structures and (if not found there) via getenv() from the Apache server process. In an Apache configuration file, environment variables accessible using the ``%{ENV}`` variable reference can be setup by using directives such as SetEnv and RewriteRule. For example, to group all WSGI scripts for a specific user when using mod_userdir within the same application group, the following could be used:: RewriteEngine On RewriteCond %{REQUEST_URI} ^/~([^/]+) RewriteRule . - [E=APPLICATION_GROUP:~%1] Options ExecCGI SetHandler wsgi-script WSGIApplicationGroup %{ENV:APPLICATION_GROUP} Defining Process Groups ----------------------- By default all WSGI applications will run in what is called 'embedded' mode. That is, the applications are run within Python sub interpreters hosted within the Apache child processes. Although this results in the best performance possible, there are a few down sides. First off, embedded mode is not recommended where you are not adept at tuning Apache. This is because the default MPM settings are never usually suitable for Python web applications, instead being biased towards static file serving and PHP applications. If you run embedded mode without tuning the MPM settings, you can experience problems with memory usage, due to default number of processes being too many, and can also experience load spikes, due to how Apache performs lazy creation of processes to meet demand. Secondly, embedded mode would not be suitable for shared web hosting environments as all applications run as the same user and through various means could interfere with each other. Running multiple Python applications within the same process, even if separated into distinct sub interpreters also presents other challenges and problems. These include problems with Python extension modules not being implemented correctly such that they work from a secondary sub interpreter, or when used from multiple sub interpreters at the same time. Where multiple applications, potentially owned by different users, need to be run, 'daemon' mode of mod_wsgi should instead be used. Using daemon mode, each application can be delegated to its own dedicated daemon process running just the WSGI application, with the Apache child processes merely acting as proxies for delivering the requests to the application. Any static files associated with the application would still be served up by the Apache child processes to ensure best performance possible. To denote that a daemon process should be created the WSGIDaemonProcess directive is used. The WSGIProcessGroup directive is then used to delegate specific WSGI applications to execute within that daemon process:: WSGIDaemonProcess www.site.com threads=15 maximum-requests=10000 Alias /favicon.ico /usr/local/wsgi/static/favicon.ico AliasMatch /([^/]*\.css) /usr/local/wsgi/static/styles/$1 Alias /media/ /usr/local/wsgi/static/media/ Order allow,deny Allow from all = 2.4> Require all granted WSGIScriptAlias / /usr/local/wsgi/scripts/myapp.wsgi WSGIProcessGroup www.site.com Order allow,deny Allow from all = 2.4> Require all granted Where Apache has been started as the ``root`` user, the daemon processes can optionally be run as a user different to that which the Apache child processes would normally be run as. The number of daemon processes making up the process group and whether they are single or multithreaded can also be controlled. A further option which should be considered is that which dictates the maximum number of requests that a daemon process should be allowed to accept before the daemon process is shutdown and restarted. This should be used where there are problems with increasing memory use due to problems with the application itself or a third party extension module. As a general recommendation it would probably be a good idea to use the maximum requests option when running large installations of packages such as Trac and MoinMoin. Any large web site based on frameworks such as Django, TurboGears and Pylons or applications which use a database backend may also benefit. If an application does not shutdown cleanly when the maximum number of requests has been reached, it will be killed off after the shutdown timeout has expired. If this occurs on a regular basis you should run with more than a single daemon process in the process group such that the other process can still accept requests while the first is being restarted. If the maximum requests option is not specified, then the daemon process will never expire and will only be restarted if Apache is restarted or the user explicitly signals it to restart. For further information about the options that can be supplied to the WSGIDaemonProcess directive see the more detailed documentation for :doc:`../configuration-directives/WSGIDaemonProcess`. A few of the options which can be supplied to the WSGIDaemonProcess directive worth highlighting are: **user=name | user=#uid** Defines the UNIX user _name_ or numeric user _uid_ of the user that the daemon processes should be run as. If this option is not supplied the daemon processes will be run as the same user that Apache would run child processes and as defined by the User directive. Note that this option is ignored if Apache wasn't started as the root user, in which case no matter what the settings, the daemon processes will be run as the user that Apache was started as. **group=name | group=#gid** Defines the UNIX group _name_ or numeric group _gid_ of the primary group that the daemon processes should be run as. If this option is not supplied the daemon processes will be run as the same group that Apache would run child processes and as defined by the Group directive. Note that this option is ignored if Apache wasn't started as the root user, in which case no matter what the settings, the daemon processes will be run as the group that Apache was started as. **processes=num** Defines the number of daemon processes that should be started in this process group. If not defined then only one process will be run in this process group. Note that if this option is defined as 'processes=1', then the WSGI environment attribute called 'wsgi.multiprocess' will be set to be True whereas not providing the option at all will result in the attribute being set to be False. This distinction is to allow for where some form of mapping mechanism might be used to distribute requests across multiple process groups and thus in effect it is still a multiprocess application. If you need to ensure that 'wsgi.multiprocess' is False so that interactive debuggers will work, simply do not specify the 'processes' option and allow the default single daemon process to be created in the process group. **threads=num** Defines the number of threads to be created to handle requests in each daemon process within the process group. If this option is not defined then the default will be to create 15 threads in each daemon process within the process group. **maximum-requests=nnn** Defines a limit on the number of requests a daemon process should process before it is shutdown and restarted. Setting this to a non zero value has the benefit of limiting the amount of memory that a process can consume by (accidental) memory leakage. If this option is not defined, or is defined to be 0, then the daemon process will be persistent and will continue to service requests until Apache itself is restarted or shutdown. Note that the name of the daemon process group must be unique for the whole server. That is, it is not possible to use the same daemon process group name in different virtual hosts. If the WSGIDaemonProcess directive is specified outside of all virtual host containers, any WSGI application can be delegated to be run within that daemon process group. If the WSGIDaemonProcess directive is specified within a virtual host container, only WSGI applications associated with virtual hosts with the same server name as that virtual host can be delegated to that set of daemon processes. When WSGIDaemonProcess is associated with a virtual host, the error log associated with that virtual host will be used for all Apache error log output from mod_wsgi rather than it appear in the main Apache error log. For example, if a server is hosting two virtual hosts and it is desired that the WSGI applications related to each virtual host run in distinct processes of their own and as a user which is the owner of that virtual host, the following could be used:: ServerName www.site1.com CustomLog logs/www.site1.com-access_log common ErrorLog logs/ww.site1.com-error_log WSGIDaemonProcess www.site1.com user=joe group=joe processes=2 threads=25 WSGIProcessGroup www.site1.com ... ServerName www.site2.com CustomLog logs/www.site2.com-access_log common ErrorLog logs/www.site2.com-error_log WSGIDaemonProcess www.site2.com user=bob group=bob processes=2 threads=25 WSGIProcessGroup www.site2.com ... When using the WSGIProcessGroup directive, the argument to the directive can be either one of two special expanding variables or the actual name of a group of daemon processes setup using the WSGIDaemonProcess directive. The meaning of the special variables are: **%{GLOBAL}** The process group name will be set to the empty string. Any WSGI applications in the global process group will always be executed within the context of the standard Apache child processes. Such WSGI applications will incur the least runtime overhead, however, they will share the same process space with other Apache modules such as PHP, as well as the process being used to serve up static file content. Running WSGI applications within the standard Apache child processes will also mean the application will run as the user that Apache would normally run as. **%{ENV:variable}** The process group name will be set to the value of the named environment variable. The environment variable is looked-up via the internal Apache notes and subprocess environment data structures and (if not found there) via getenv() from the Apache server process. The result must identify a named process group setup using the WSGIDaemonProcess directive. In an Apache configuration file, environment variables accessible using the `%{ENV}` variable reference can be setup by using directives such as SetEnv and RewriteRule. For example, to select which process group a specific WSGI application should execute within based on entries in a database file, the following could be used:: RewriteEngine On RewriteMap wsgiprocmap dbm:/etc/httpd/wsgiprocmap.dbm RewriteRule . - [E=PROCESS_GROUP:${wsgiprocmap:%{REQUEST_URI}}] WSGIProcessGroup %{ENV:PROCESS_GROUP} Note that the WSGIDaemonProcess directive and corresponding features are not available on Windows or when running Apache 1.3. mod_wsgi-5.0.0/docs/user-guides/configuration-issues.rst000066400000000000000000000057061452636074700234500ustar00rootroot00000000000000==================== Configuration Issues ==================== Many Linux distributions in particular do not structure an Apache installation in the default manner as dictated by the original Apache code distributed by the Apache Software Foundation. This fact, and differences between different operating systems and distributions means that the configuration for mod_wsgi may sometimes have to be tweaked. The purpose of this document is to capture all the known problems that can arise in respect of configuration. If you are having a problem which doesn't seem to be covered by this document, also make sure you see :doc:`../user-guides/installation-issues` and :doc:`../user-guides/application-issues`. Location Of UNIX Sockets ------------------------ When mod_wsgi is used in 'daemon' mode, UNIX sockets are used to communicate between the Apache child processes and the daemon processes which are to handle a request. These sockets and any related mutex lock files will be placed in the standard Apache runtime directory. This is the same directory that the Apache log files would normally be placed. For some Linux distributions, restrictive permissions are placed on the standard Apache runtime directory such that the directory is not readable to others. This can cause problems with mod_wsgi because the user that the Apache child processes run as will subsequently not have the required permissions to access the directory to be able to connect to the sockets. When this occurs, a '503 Service Temporarily Unavailable' error response would be received by the client. The Apache error log file would show messages of the form:: (13)Permission denied: mod_wsgi (pid=26962): Unable to connect to WSGI \ daemon process '' on '/etc/httpd/logs/wsgi.26957.0.1.sock' \ after multiple attempts. To resolve the problem, the WSGISocketPrefix directive should be defined to point at an alternate location. The value may be a location relative to the Apache root directory, or an absolute path. On systems which restrict access to the standard Apache runtime directory, they normally provide an alternate directory for placing sockets and lock files used by Apache modules. This directory is usually called 'run' and to make use of this directory the WSGISocketPrefix directive would be set as follows:: WSGISocketPrefix run/wsgi Although this may be present, do be aware that some Linux distributions, notably RedHat, also lock down the permissions of this directory as well so not readable to processes running as a non root user. In this situation you will be forced to use the operating system level '/var/run' directory rather than the HTTP specific directory:: WSGISocketPrefix /var/run/wsgi Note, do not put the sockets in the system temporary working directory. That is, do not go making the prefix '/tmp/wsgi'. The directory should be one that is only writable by 'root' user, or if not starting Apache as 'root', the user that Apache is started as. mod_wsgi-5.0.0/docs/user-guides/debugging-techniques.rst000066400000000000000000001421311452636074700233630ustar00rootroot00000000000000==================== Debugging Techniques ==================== Be it when initially setting up mod_wsgi for the first time, or later during development or use of your WSGI application, you are bound to get some sort of unexpected Python error. By default all you are usually going to see as evidence of this is a HTTP 500 "Internal Server Error" being displayed in the browser window and little else. The purpose of this document is to explain where to go look for more details on what caused the error, describe techniques one can use to have your WSGI application generate more useful debug information, as well as mechanisms that can be employed to interactively debug your application. Note that although this document is intended to deal with techniques which can be used when using mod_wsgi, many of the techniques are also directly transferable or adaptable to other web hosting mechanisms for WSGI applications. Apache Error Log Files ---------------------- When using mod_wsgi, unless you or the web framework you are using takes specific action to catch exceptions and present the details in an alternate manner, the only place that details of uncaught exceptions will be recorded is in the Apache error log files. The Apache error log files are therefore your prime source of information when things go wrong. Do note though that log messages generated by mod_wsgi are logged with various severity levels and which ones will be output to the Apache error log files will depend on how Apache has been configured. The standard configuration for Apache has the !LogLevel directive being set to 'warn'. With this setting any important error messages will be output, but informational messages generated by mod_wsgi which can assist in working out what it is doing are not. Thus, if new to mod_wsgi or trying to debug a problem, it is worthwhile setting the Apache configuration to use 'info' log level instead:: LogLevel info If your Apache web server is only providing services for one host, it is likely that you will only have one error log file. If however the Apache web server is configured for multiple virtual hosts, then it is possible that there will be multiple error log files, one corresponding to the main server host and an additional error log file for each virtual host. Such a virtual host specific error log if one is being used, would have been configured through the placing of the Apache CustomLog directive within the context of the VirtualHost container. Although your WSGI application may be hosted within a particular virtual host and that virtual host has its own error log file, some error and informational messages will still go to the main server host error log file. Thus you may still need to consult both error log files when using virtual hosts. Messages of note that will end up in the main server host error log file include notifications in regard to initialisation of Python and the creation and destruction of Python sub interpreters, plus any errors which occur when doing this. Messages of note that would end up in the virtual host error log file, if it exists, include details of uncaught Python exceptions which occur when the WSGI application script is being loaded, or when the WSGI application callable object is being executed. Messages that are logged by a WSGI application via the 'wsgi.errors' object passed through to the application in the WSGI environment are also logged. These will go to the virtual host error log file if it exists, or the main error log file if the virtual host is not setup with its own error log file. Thus, if you want to add debugging messages to your WSGI application code, you can use 'wsgi.errors' in conjunction with the 'print' statement as shown below:: def application(environ, start_response): status = '200 OK' output = b'Hello World!' print("application debug #1", file=environ['wsgi.errors']) response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) print("application debug #2", file=environ['wsgi.errors']) return [output] .. note:: If you are using Python 2, you will need to enable the `print` function at the beginning of the file:: from __future__ import print_function Alternatively, always use `print` as a statement rather than a function:: print >> environ['wsgi.errors'], "application debug #N" If 'wsgi.errors' is not available to the code which needs to output log messages, then it should explicitly direct output from 'print' to 'sys.stderr':: import sys def function(): print("application debug #3", file=sys.stderr) ... If ``sys.stderr`` or ``sys.stdout`` is used directly then these messages will end up in the main server host error log file and not that for the virtual host unless the WSGI application is running in a daemon process specifically associated with a virtual host. Do be aware though that writing to ``sys.stdout`` is by default restricted in versions of mod_wsgi prior to 3.0 and will result in an exception occurring of the form:: IOError: sys.stdout access restricted by mod_wsgi This is because portable WSGI applications should not write to ``sys.stdout`` or use 'print' without specifying an alternate file object besides ``sys.stdout`` as the target. This restriction can be disabled for the whole server using the WSGIRestrictStdout directive, or by mapping ``sys.stdout`` to ``sys.stderr`` at global scope within in the WSGI application script file:: import sys sys.stdout = sys.stderr In general, a WSGI application should always endeavour to only log messages via the 'wsgi.errors' object that is passed through to a WSGI application in the WSGI environment. This is because this is the only way of logging messages for which there is some guarantee that they will end up in a log file that you might have access to if using a shared server. An application shouldn't however cache 'wsgi.errors' and try to use it outside of the context of a request. If this is done an exception will be raised indicating that the request has expired and the error log object is now invalid. That messages output via ``sys.stderr`` and ``sys.stdout`` end up in the Apache error logs at all is provided as a convenience but there is no requirement in the WSGI specification that they are valid means of a WSGI application logging messages. Displaying Request Environment ------------------------------ When a WSGI application is invoked, the request headers are passed as CGI variables in the WSGI request environment. The dictionary used for this also holds information about the WSGI execution environment and mod_wsgi. This includes mod_wsgi specific variables indicating the name of the process and application groups within which the WSGI application is executing. Knowing the values of the process and application group variables can be important when needing to validate that your Apache configuration is doing what you intended as far as ensuring your WSGI application is running in daemon mode or otherwise. A simple way of validating such details or getting access to any of the other WSGI request environment variables is to substitute your existing WSGI application with one which echos back the details to your browser. Such a task can be achieved with the following test application. The application could be extended as necessary to display other information as well, with process ID, user ID and group ID being shown as examples:: import cStringIO import os def application(environ, start_response): headers = [] headers.append(('Content-Type', 'text/plain')) write = start_response('200 OK', headers) input = environ['wsgi.input'] output = cStringIO.StringIO() print("PID: %s" % os.getpid(), file=output) print("UID: %s" % os.getuid(), file=output) print("GID: %s" % os.getgid(), file=output) print(file=output) keys = environ.keys() keys.sort() for key in keys: print('%s: %s' % (key, repr(environ[key])), file=output) print(file=output) output.write(input.read(int(environ.get('CONTENT_LENGTH', '0')))) return [output.getvalue()] For the case of the process group as recorded by the 'mod_wsgi.process_group' variable in the WSGI request environment, if the value is an empty string then the WSGI application is running in embedded mode. For any other value it will be running in daemon mode with the process group named by the variables value. Note that by default WSGI applications run in embedded mode, which means within the Apache server child processes which accept the original requests. Daemon mode processes would only be used through appropriate use of the WSGIDaemonProcess and WSGIProcessGroup directives to delegate the WSGI application to a named daemon process group. For the case of the application group as recorded by the 'mod_wsgi.application_group' variable in the WSGI request environment, if the value is an empty string then the WSGI application is running in the main Python interpreter. That is, the very first interpreter created when Python was initialised. For any other value it indicates it is running in the named Python sub interpreter. Note that by default WSGI applications would always run in a sub interpreter rather than the main interpreter. The name of this sub interpreter would be automatically constructed from the name of the server or virtual host, the URL mount point of the WSGI application and the number of the listener port when it is other than ports 80 or 443. To delegate a WSGI application to run in main Python interpreter, the WSGIApplicationGroup directive would need to have been used with the value '%{GLOBAL}'. Although the value is '%{GLOBAL}', this translates to the empty string seen for the value of 'mod_wsgi.application_group' within the WSGI request environment. The WSGIApplicationGroup directive could also be used to designate a specific named sub interpreter rather than that selected automatically. For newcomers this can all be a bit confusing, which is where the test application comes in as you can use it to validate where your WSGI application is running is where you intended it to run. The set of WSGI request environment variables will also show the WSGI variables indicating whether process is multithreaded and whether the process group is multiprocess or not. For a more complete explanation of what that means see documentation of :doc:`../user-guides/processes-and-threading`. Tracking Request and Response ----------------------------- Although one can use above test application to display the request environment, it is replacing your original WSGI application. Rather than replace your existing application you can use a WSGI middleware wrapper application which logs the details to the Apache error log instead:: # Original WSGI application. def application(environ, start_response): ... # Logging WSGI middleware. import pprint class LoggingMiddleware: def __init__(self, application): self.__application = application def __call__(self, environ, start_response): errors = environ['wsgi.errors'] pprint.pprint(('REQUEST', environ), stream=errors) def _start_response(status, headers, *args): pprint.pprint(('RESPONSE', status, headers), stream=errors) return start_response(status, headers, *args) return self.__application(environ, _start_response) application = LoggingMiddleware(application) The output from the middleware would end up in the Apache error log for the virtual host, or if no virtual host specific error log file, in the main Apache error log file. For more complicated problems it may also be necessary to track both the request and response content as well. A more complicated middleware which can log these as well as header information to the file system is as follows:: # Original WSGI application. def application(environ, start_response): ... # Logging WSGI middleware. import threading import pprint import time import os class LoggingInstance: def __init__(self, start_response, oheaders, ocontent): self.__start_response = start_response self.__oheaders = oheaders self.__ocontent = ocontent def __call__(self, status, headers, *args): pprint.pprint((status, headers)+args, stream=self.__oheaders) self.__oheaders.close() self.__write = self.__start_response(status, headers, *args) return self.write def __iter__(self): return self def write(self, data): self.__ocontent.write(data) self.__ocontent.flush() return self.__write(data) def __next__(self): data = next(self.__iterable) self.__ocontent.write(data) self.__ocontent.flush() return data def close(self): if hasattr(self.__iterable, 'close'): self.__iterable.close() self.__ocontent.close() def link(self, iterable): self.__iterable = iter(iterable) class LoggingMiddleware: def __init__(self, application, savedir): self.__application = application self.__savedir = savedir self.__lock = threading.Lock() self.__pid = os.getpid() self.__count = 0 def __call__(self, environ, start_response): self.__lock.acquire() self.__count += 1 count = self.__count self.__lock.release() key = "%s-%s-%s" % (time.time(), self.__pid, count) iheaders = os.path.join(self.__savedir, key + ".iheaders") iheaders_fp = open(iheaders, 'w') icontent = os.path.join(self.__savedir, key + ".icontent") icontent_fp = open(icontent, 'w+b') oheaders = os.path.join(self.__savedir, key + ".oheaders") oheaders_fp = open(oheaders, 'w') ocontent = os.path.join(self.__savedir, key + ".ocontent") ocontent_fp = open(ocontent, 'w+b') errors = environ['wsgi.errors'] pprint.pprint(environ, stream=iheaders_fp) iheaders_fp.close() length = int(environ.get('CONTENT_LENGTH', '0')) input = environ['wsgi.input'] while length != 0: data = input.read(min(4096, length)) if data: icontent_fp.write(data) length -= len(data) else: length = 0 icontent_fp.flush() icontent_fp.seek(0, os.SEEK_SET) environ['wsgi.input'] = icontent_fp iterable = LoggingInstance(start_response, oheaders_fp, ocontent_fp) iterable.link(self.__application(environ, iterable)) return iterable application = LoggingMiddleware(application, '/tmp/wsgi') For this middleware, the second argument to the constructor should be a preexisting directory. For each request four files will be saved. These correspond to input headers, input content, response status and headers, and request content. Poorly Performing Code ---------------------- The WSGI specification allows any iterable object to be returned as the response, so long as the iterable yields byte-strings (``bytes``, or ``str`` on Python 2). That this is the case means that one can too easily return an object which satisfies this requirement but has some sort of performance related issue. The worst case of this is where instead of returning a list containing byte-strings, a single byte-string value is returned. The problem is that when a byte-string is iterated over, a single byte is yielded each time. In other words, a single byte is written back to the client on each loop, with a flush occurring in between to ensure that the byte has actually been written and isn't just being buffered. Although for small byte-strings a performance impact may not be noticed, if returning more data the effect on request throughput could be quite significant. Another case which can cause problems is to return a file like object. For iteration over a file like object, typically what can occur is that a single line within the file is returned each time. If the file is a line oriented text file where each line is a of a reasonable length, this may be okay, but if the file is a binary file there may not actually be line breaks within the file. For the case where file contains many short lines, throughput would be affected much like in the case where a byte-string is returned. For the case where the file is just binary data, the result can be that the complete file may be read in on the first loop. If the file is large, this could cause a large transient spike in memory usage. Once that memory is allocated, it will then usually be retained by the process, albeit that it may be reused by the process at a later point. Because of the performance impacts in terms of throughput and memory usage, both these cases should be avoided. For the case of returning a byte-string, it should be returned with a single element list. For the case of a file like object, the 'wsgi.file_wrapper' extension should be used, or a wrapper which suitably breaks the response into chunks. In order to identify where code may be inadvertently returning such iterable types, the following code can be used:: import io import socket import sys BAD_ITERABLES = [ bytes, socket.socket, io.IOBase, ] if sys.version_info < (3, 0): # Python 2 import types import cStringIO import StringIO BAD_ITERABLES.extend([ types.FileType, cStringIO.InputType, StringIO.StringIO, ]) class ValidatingMiddleware: def __init__(self, application): self.__application = application def __call__(self, environ, start_response): errors = environ['wsgi.errors'] result = self.__application(environ, start_response) value = type(result) if value in BAD_ITERABLES: print('BAD ITERABLE RETURNED: ', file=errors, end='') print('URL=%s ' % environ['REQUEST_URI'], file=errors, end='') print('TYPE=%s' % value, file=errors) return result def application(environ, start_response): ... application = ValidatingMiddleware(application) Error Catching Middleware ------------------------- Because mod_wsgi only logs details of uncaught exceptions to the Apache error log and returns a generic HTTP 500 "Internal Server Error" response, if you want the details of any exception to be displayed in the error page and be visible from the browser, you will need to use a WSGI error catching middleware component. One example of WSGI error catching middleware is the ErrorMiddleware class from Paste. * https://pythonpaste.readthedocs.io/en/latest/ This class can be configured not only to catch exceptions and present the details to the browser in an error page, it can also be configured to send the details of any errors in email to a designated recipient, or log the details to an alternate log file. Being able to have error details sent by email would be useful in a production environment or where your application is running on a web hosting environment and the Apache error logs would not necessarily be closely monitored on a day to day basis. Enabling of that particular feature though should possibly only be done when you have some confidence in the application else you might end up getting inundated with emails. To use the error catching middleware from Paste you simply need to wrap your existing application with it such that it then becomes the top level application entry point:: def application(environ, start_response): status = '200 OK' output = b'Hello World!\n' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] from paste.exceptions.errormiddleware import ErrorMiddleware application = ErrorMiddleware(application, debug=True) In addition to displaying information about the Python exception that has occurred and the stack traceback, this middleware component will also output information about the WSGI environment such that you can see what was being passed to the WSGI application. This can be useful if the cause of any problem was unexpected values passed in the headers of the HTTP request. Note that error catching middleware is of absolutely no use for trying to capture and display in the browser any errors that occur at global scope within the WSGI application script when it is being imported. Details of any such errors occurring at this point will only be captured in the Apache error log files. As much as possible you should avoid performing complicated tasks when the WSGI application script file is being imported, instead you should only trigger such actions the first time a request is received. By doing this you will be able to capture errors in such initialisation code with the error catching middleware. Also note that the debug mode whereby details are displayed in the browser should only be used during development and not in a production system. This is because details which are displayed may be of use to anyone who may wish to compromise your site. Python Interactive Debugger --------------------------- Python debuggers such as implemented by the 'pdb' module can sometimes be useful in debugging Python applications, especially where there is a need to single step through code and analyse application state at each point. Use of such debuggers in web applications can be a bit more tricky than normal applications though and especially so with mod_wsgi. The problem with mod_wsgi is that the Apache web server can create multiple child processes to respond to requests. Partly because of this, but also just to prevent problems in general, Apache closes off standard input at startup. Thus there is no actual way to interact with the Python debugger module if it were used. To get around this requires having complete control of the Apache web server that you are using to host your WSGI application. In particular, it will be necessary to shutdown the web server and then startup the 'httpd' process explicitly in single process debug mode, avoiding the 'apachectl' management application altogether:: $ apachectl stop $ httpd -X If Apache is normally started as the 'root' user, this also will need to be run as the 'root' user otherwise the Apache web server will not have the required permissions to write to its log directories etc. The result of starting the 'httpd' process in this way will be that the Apache web server will run everything in one process rather than using multiple processes. Further, it will not close off standard input thus allowing the Python debugger to be used. Do note though that one cannot be using the ability of mod_wsgi to run your application in a daemon process when doing this. The WSGI application must be running within the main Apache process. To trigger the Python debugger for any call within your code, the following customised wrapper for the 'Pdb' class should be used:: class Debugger: def __init__(self, object): self.__object = object def __call__(self, *args, **kwargs): import pdb, sys debugger = pdb.Pdb() debugger.use_rawinput = 0 debugger.reset() sys.settrace(debugger.trace_dispatch) try: return self.__object(*args, **kwargs) finally: debugger.quitting = 1 sys.settrace(None) This might for example be used to wrap the actual WSGI application callable object:: def application(environ, start_response): status = '200 OK' output = b'Hello World!\n' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] application = Debugger(application) When a request is now received, the Python debugger will be triggered and you can interactively debug your application from the window you ran the 'httpd' process. For example:: > /usr/local/wsgi/scripts/hello.py(21)application() -> status = '200 OK' (Pdb) list 16 finally: 17 debugger.quitting = 1 18 sys.settrace(None) 19 20 def application(environ, start_response): 21 -> status = '200 OK' 22 output = b'Hello World!\n' 23 24 response_headers = [('Content-type', 'text/plain'), 25 ('Content-Length', str(len(output)))] 26 start_response(status, response_headers) (Pdb) print start_response cont When wishing to allow the request to complete, issue the 'cont' command. If wishing to cause the request to abort, issue the 'quit' command. This will result in a 'BdbQuit' exception being raised and would result in a HTTP 500 "Internal Server Error" response being returned to the client. To kill off the whole 'httpd' process, after having issued 'cont' or 'quit' to exit the debugger, interrupt the process using 'CTRL-C'. To see what commands the Python debugger accepts, issue the 'help' command and also consult the documentation for the 'pdb' module on the Python web site. Note that the Python debugger expects to be able to write to ``sys.stdout`` to display information to the terminal. Thus if using using a Python web framework which replaces ``sys.stdout`` such as web.py, you will not be able to use the Python debugger. Browser Based Debugger ---------------------- In order to use the Python debugger modules you need to have direct access to the host and the Apache web server that is running your WSGI application. If your only access to the system is via your web browser this makes the use of the full Python debugger impractical. An alternative to the Python debugger modules which is available is an extension of the WSGI error catching middleware previously described. This is the EvalException class from Paste. It embodies the error catching attributes of the ErrorMiddleware class, but also allows some measure of interactive debugging and introspection through the web browser. As with any WSGI middleware component, to use the class entails creating a wrapper around the application you wish to debug:: def application(environ, start_response): status = '200 OK' output = b'Hello World!\n' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] from paste.evalexception.middleware import EvalException application = EvalException(application) Like ErrorMiddleware when an unexpected exception occurs a web page is presented which shows the location of the error along with the contents of the WSGI application environment. Where EvalException is different however is that it is possible to inspect the local variables residing within each stack frame down to where the error occurred. Further, it is possible to enter Python code which can be evaluated within the context of the selected stack frame in order to access data or call functions or methods of objects. In order for this to all work requires that subsequent requests back to the WSGI application always end up with the same process where the error originally occurred. With mod_wsgi this does however present a bit of a problem as Apache can create and use multiple child processes to handle requests. Because of this requirement, if you want to be able to use this browser based interactive debugger, if running your application in embedded mode of mod_wsgi, you will need to configure Apache such that it only starts up one child process to handle requests and that it never creates any additional processes. The Apache configuration directives required to achieve this are as follows:: StartServers 1 ServerLimit 1 The directives must be placed at global scope within the main Apache configuration files and will affect the whole Apache web server. If you are using the worker MPM on a UNIX system, restricting Apache to just a single process may not be an issue, at least during development. If however you are using the prefork MPM on a UNIX system, you may see issues if you are using an AJAX intensive page that relies on being able to execute parallel requests, as only one request at a time will be able to be handled by the Apache web server. If using Apache 2.X on a UNIX system, a better approach is to use daemon mode of mod_wsgi and delegate your application to run in a single daemon process. This process may be single or multithreaded as per any threading requirements of your application. Which ever configuration is used, if the browser based interactive debugger is used it should only be used on a development system and should never be deployed on a production system or in a web hosting environment. This is because the debugger will allow one to execute arbitrary Python code within the context of your application from a remote client. Debugging Crashes With GDB -------------------------- In cases where Apache itself crashes for no apparent reason, the above techniques are not always particularly useful. This is especially the case where the crash occurs in non Python code outside of your WSGI application. The most common cause of Apache crashing, besides any still latent bugs that may exist in mod_wsgi, of which hopefully there aren't any, are shared library version mismatches. Another major cause of crashes is third party C extension modules for Python which are not compatible with being used in a Python sub interpreter which isn't the first interpreter created when Python is initialised, or modules which are not compatible with Python sub interpreters being destroyed and the module then being used in a new Python sub interpreter. Examples of where shared library version mismatches are known to occur are between the version of the 'expat' library used by Apache and that embedded within the Python 'pyexpat' module. Another is between the version of the MySQL client libraries used by PHP and the Python MySQL module. Both these can be a cause of crashes where the different components are compiled and linked against different versions of the shared library for the packages in question. It is vitally important that all packages making use of a shared library were compiled against and use the same version of a shared library. Another problematic package is Subversion. In this case there can be conflicts between the version of Subversion libraries used by mod_dav_svn and the Python Subversion bindings. Certain versions of the Python Subversion modules also cause problems because they appear to be incompatible with use in a Python sub interpreter which isn't the first interpreter created when Python is initialised. In this latter issue, the sub interpreter problems can often be solved by forcing the WSGI application using the Python Subversion modules to run in the '%{GLOBAL}' application group. This solution often also resolves issues with SWIG generated bindings, especially where the ``-thread`` option was supplied to 'swig' when the bindings were generated. Whatever the reason, in some cases the only way to determine why Apache or Python is crashing is to use a C code debugger such as 'gdb'. Now although it is possible to attach 'gdb' to a running process, the preferred method for using 'gdb' in conjunction with Apache is to run Apache in single process debug mode from within 'gdb'. To do this it is necessary to first shutdown Apache. The 'gdb' debugger can then be started against the 'httpd' executable and then the process started up from inside of 'gdb' with the `-X` flag to select single-process debug mode:: $ /usr/local/apache/bin/apachectl stop $ sudo gdb /usr/local/apache/bin/httpd GNU gdb 6.1-20040303 (Apple version gdb-384) (Mon Mar 21 00:05:26 GMT 2005) Copyright 2004 Free Software Foundation, Inc. GDB is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Type "show copying" to see the conditions. There is absolutely no warranty for GDB. Type "show warranty" for details. This GDB was configured as "powerpc-apple-darwin"...Reading symbols for shared libraries ........ done (gdb) run -X Starting program: /usr/local/apache/bin/httpd -X Reading symbols for shared libraries .+++ done Reading symbols for shared libraries ..................... done If Apache is normally started as the 'root' user, this also will need to be run as the 'root' user otherwise the Apache web server will not have the required permissions to write to its log directories etc. If Apache was crashing on startup, you should immediately encounter the error, otherwise use your web browser to access the URL which is causing the crash to occur. You can then commence trying to debug why the crash is occurring. Note that you should ensure that you have not assigned your WSGI application to run in a mod_wsgi daemon process using the WSGIDaemonProcess and WSGIProcessGroup directives. This is because the above procedure will only catch crashes which occur when the application is running in embedded mode. If it turns out that the application only crashes when run in mod_wsgi daemon mode, an alternate method of using 'gdb' will be required. In this circumstance you should run Apache as normal, but ensure that you only create one mod_wsgi daemon process and have it use only a single thread:: WSGIDaemonProcess debug threads=1 WSGIProcessGroup debug If not running the daemon process as a distinct user where you can tell which process it is, then you will also need to ensure that Apache !LogLevel directive has been set to 'info'. This is to ensure that information about daemon processes created by mod_wsgi are logged to the Apache error log. This is necessary, as you will need to consult the Apache error logs to determine the process ID of the daemon process that has been created for that daemon process group:: mod_wsgi (pid=666): Starting process 'debug' with threads=1. Knowing the process ID, you should then run 'gdb', telling it to attach directly to the daemon process:: $ sudo gdb /usr/local/apache/bin/httpd 666 GNU gdb 6.1-20040303 (Apple version gdb-384) (Mon Mar 21 00:05:26 GMT 2005) Copyright 2004 Free Software Foundation, Inc. GDB is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Type "show copying" to see the conditions. There is absolutely no warranty for GDB. Type "show warranty" for details. This GDB was configured as "powerpc-apple-darwin"...Reading symbols for shared libraries ........ done /Users/grahamd/666: No such file or directory. Attaching to program: `/usr/local/apache/bin/httpd', process 666. Reading symbols for shared libraries .+++..................... done 0x900c7060 in sigwait () (gdb) cont Continuing. Once 'gdb' has been started and attached to the process, then initiate the request with the URL that causes the application to crash. Attaching to the running daemon process can also be useful where a single request or the whole process is appearing to hang. In this case one can force a stack trace to be output for all running threads to try and determine what code is getting stuck. The appropriate gdb command in this instance is 'thread apply all bt':: sudo gdb /usr/local/apache-2.2/bin/httpd 666 GNU gdb 6.3.50-20050815 (Apple version gdb-477) (Sun Apr 30 20:06:22 GMT 2006) Copyright 2004 Free Software Foundation, Inc. GDB is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Type "show copying" to see the conditions. There is absolutely no warranty for GDB. Type "show warranty" for details. This GDB was configured as "powerpc-apple-darwin"...Reading symbols for shared libraries ....... done /Users/grahamd/666: No such file or directory. Attaching to program: `/usr/local/apache/bin/httpd', process 666. Reading symbols for shared libraries .+++..................... done 0x900c7060 in sigwait () (gdb) thread apply all bt Thread 4 (process 666 thread 0xd03): #0 0x9001f7ac in select () #1 0x004189b4 in apr_pollset_poll (pollset=0x1894650, timeout=-1146117585187099488, num=0xf0182d98, descriptors=0xf0182d9c) at poll/unix/select.c:363 #2 0x002a57f0 in wsgi_daemon_thread (thd=0x1889660, data=0x18895e8) at mod_wsgi.c:6980 #3 0x9002bc28 in _pthread_body () Thread 3 (process 666 thread 0xc03): #0 0x9001f7ac in select () #1 0x0041d224 in apr_sleep (t=1000000) at time/unix/time.c:246 #2 0x002a2b10 in wsgi_deadlock_thread (thd=0x0, data=0x2aee68) at mod_wsgi.c:7119 #3 0x9002bc28 in _pthread_body () Thread 2 (process 666 thread 0xb03): #0 0x9001f7ac in select () #1 0x0041d224 in apr_sleep (t=299970002) at time/unix/time.c:246 #2 0x002a2dec in wsgi_monitor_thread (thd=0x0, data=0x18890e8) at mod_wsgi.c:7197 #3 0x9002bc28 in _pthread_body () Thread 1 (process 666 thread 0x203): #0 0x900c7060 in sigwait () #1 0x0041ba9c in apr_signal_thread (signal_handler=0x2a29a0 ) at threadproc/unix/signals.c:383 #2 0x002a3728 in wsgi_start_process (p=0x1806418, daemon=0x18890e8) at mod_wsgi.c:7311 #3 0x002a6a4c in wsgi_hook_init (pconf=0x1806418, ptemp=0x0, plog=0xc8, s=0x18be8d4) at mod_wsgi.c:7716 #4 0x0000a5b0 in ap_run_post_config (pconf=0x1806418, plog=0x1844418, ptemp=0x180e418, s=0x180da78) at config.c:91 #5 0x000033d4 in main (argc=3, argv=0xbffffa8c) at main.c:706 It is suggested when trying to debug such issues that the daemon process be made to run with only a single thread. This will reduce how many stack traces one needs to analyse. If you are running with multiple processes within the daemon process group and all requests are hanging, you will need to get a snapshot of what is happening in all processes in the daemon process group. Because doing this by hand will be tedious, it is better to automate it. To automate capturing the stack traces, first create a file called 'gdb.cmds' which contains the following:: set pagination 0 thread apply all bt detach quit This can then be used in conjunction with 'gdb' to avoid needing to enter the commands manually. For example:: sudo gdb /usr/local/apache-2.2/bin/httpd -x gdb.cmds -p 666 To be able to automate this further and apply it to all processes in a daemon process group, then first off ensure that daemon processes are named in 'ps' output by using the 'display-name' option to WSGIDaemonProcess directive. For example, to apply default naming strategy as implemented by mod_wsgi, use:: WSGIDaemonProcess xxx display-name=%{GLOBAL} In the output of a BSD derived 'ps' command, this will now show the process as being named '(wsgi:xxx)':: $ ps -cxo command,pid | grep wsgi (wsgi:xxx) 666 Note that the name may be truncated as the resultant name can be no longer than what was the length of the original executable path for Apache. You may therefore like to name it explicitly:: WSGIDaemonProcess xxx display-name=(wsgi:xxx) Having named the processes in the daemon process group, we can now parse the output of 'ps' to identify the process and apply the 'gdb' command script to each:: for pid in `ps -cxo command,pid | awk '{ if ($0 ~ /wsgi:xxx/ && $1 !~ /grep/) print $NF }'`; do sudo gdb -x gdb.cmds -p $pid; done The actual name given to the daemon process group using the 'display-name' option should be replaced in this command line. That is, change 'wsgi:xxx' appropriately. If you are having problems with process in daemon process groups hanging, you might consider implementing a monitoring system which automatically detects somehow when the processes are no longer responding to requests and automatically trigger this dump of the stack traces before restarting the daemon process group or Apache. Extracting Python Stack Traces ------------------------------ Using gdb to get stack traces as described above only gives you information about what is happening at the C code level. This will not tell where in the actual Python code execution was at. Your only clue is going to be where a call out was being made to some distinct C function in a C extension module for Python. One can get stack traces for Python code by using:: def _stacktraces(): code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# ThreadID: %s" % threadId) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) for line in code: print >> sys.stderr, line The caveat here obviously is that the process has to still be running. There is also the issue of how you trigger that function to dump stack traces for executing Python threads. If the problem you have is that some request handler threads are stuck, either blocked, or stuck in an infinite loop, and you want to know what they are doing, then so long as there are still some handler threads left and the application is still responding to requests, then you could trigger it from a request handler triggered by making a request against a specific URL. This though depends on you only running your application within a single process because as soon as you have multiple processes you have no guarantee that a request will go to the process you want to debug. A better method therefore is to have a perpetually running background thread which monitors for a specific file in the file system. When that file is created or the modification time changes, then the background thread would dump the stack traces for the process. Sample code which takes this approach is included below. This code could be placed temporarily at the end of your WSGI script file if you know you are going to need it because of a recurring problem:: from __future__ import print_function import os import sys import time import signal import threading import atexit import traceback try: from Queue import Queue # Python 2 except ImportError: from queue import Queue # Python 3 FILE = '/tmp/dump-stack-traces.txt' _interval = 1.0 _running = False _queue = Queue() _lock = threading.Lock() def _stacktraces(): code = [] for threadId, stack in sys._current_frames().items(): code.append("\n# ProcessId: %s" % os.getpid()) code.append("# ThreadID: %s" % threadId) for filename, lineno, name, line in traceback.extract_stack(stack): code.append('File: "%s", line %d, in %s' % (filename, lineno, name)) if line: code.append(" %s" % (line.strip())) for line in code: print(line, file=sys.stderr) try: mtime = os.path.getmtime(FILE) except: mtime = None def _monitor(): while True: global mtime try: current = os.path.getmtime(FILE) except: current = None if current != mtime: mtime = current _stacktraces() # Go to sleep for specified interval. try: return _queue.get(timeout=_interval) except: pass _thread = threading.Thread(target=_monitor) _thread.setDaemon(True) def _exiting(): try: _queue.put(True) except: pass _thread.join() atexit.register(_exiting) def _start(interval=1.0): global _interval if interval < _interval: _interval = interval global _running _lock.acquire() if not _running: prefix = 'monitor (pid=%d):' % os.getpid() print('%s Starting stack trace monitor.' % prefix, file=sys.stderr) _running = True _thread.start() _lock.release() _start() Once your WSGI script file has been loaded, then touching the file '/tmp/dump-stack-traces.txt' will cause stack traces for active Python threads to be output to the Apache error log. Note that the sample code doesn't deal with possibility that with multiple processes for same application, that all processes may attempt to dump information at the same time. As such, you may get interleaving of output from multiple processes in Apache error logs at the same time. What you may want to do is modify this code to dump out to some special directory, distinct files containing the trace where the names of the file include the process ID and a date/time. That way each will be separate. An example of what one might expect to see from the above code is as follows:: # ProcessId: 666 # ThreadID: 4352905216 File: "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/threading.py", line 497, in __bootstrap self.__bootstrap_inner() File: "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/threading.py", line 522, in __bootstrap_inner self.run() File: "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/threading.py", line 477, in run self.__target(*self.__args, **self.__kwargs) File: "/Library/WebServer/Sites/django-1/htdocs/project.wsgi", line 72, in _monitor _stacktraces() File: "/Library/WebServer/Sites/django-1/htdocs/project.wsgi", line 47, in _stacktraces for filename, lineno, name, line in traceback.extract_stack(stack): # ThreadID: 4322832384 File: "/Library/WebServer/Sites/django-1/htdocs/project.wsgi", line 21, in application return _application(environ, start_response) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/core/handlers/wsgi.py", line 245, in __call__ response = middleware_method(request, response) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/contrib/sessions/middleware.py", line 36, in process_response request.session.save() File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/contrib/sessions/backends/db.py", line 63, in save obj.save(force_insert=must_create, using=using) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/db/models/base.py", line 434, in save self.save_base(using=using, force_insert=force_insert, force_update=force_update) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/db/models/base.py", line 527, in save_base result = manager._insert(values, return_id=update_pk, using=using) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/db/models/manager.py", line 195, in _insert return insert_query(self.model, values, **kwargs) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/db/models/query.py", line 1479, in insert_query return query.get_compiler(using=using).execute_sql(return_id) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/db/models/sql/compiler.py", line 783, in execute_sql cursor = super(SQLInsertCompiler, self).execute_sql(None) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/django/db/models/sql/compiler.py", line 727, in execute_sql cursor.execute(sql, params) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/debug_toolbar/panels/sql.py", line 95, in execute stacktrace = tidy_stacktrace(traceback.extract_stack()) File: "/Library/WebServer/Sites/django-1/lib/python2.6/site-packages/debug_toolbar/panels/sql.py", line 40, in tidy_stacktrace s_path = os.path.realpath(s[0]) File: "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/posixpath.py", line 355, in realpath if islink(component): File: "/System/Library/Frameworks/Python.framework/Versions/2.6/lib/python2.6/posixpath.py", line 132, in islink st = os.lstat(path) Note that one of the displayed threads will be that for the thread which is dumping the stack traces. That stack trace can obviously be ignored. One could extend the above recipe in more elaborate ways by using a WSGI middleware that capture details of each request from the WSGI environment and also dumping out from that the URL for the request being handled by any threads. This may assist in working out whether problems are related to a specific URL. mod_wsgi-5.0.0/docs/user-guides/file-wrapper-extension.rst000066400000000000000000000245541452636074700237010ustar00rootroot00000000000000====================== File Wrapper Extension ====================== The WSGI specification supports an optional feature that can be implemented by WSGI adapters for platform specific file handling. * http://www.python.org/dev/peps/pep-0333/#optional-platform-specific-file-handling What this allows is for a WSGI application to return a special object type which wraps a Python file like object. If that file like object statisfies certain conditions as dictated by a specific platform, then the WSGI adapter is allowed to return the content of that file in an optimised manner. The intent of this is to provide better performance for serving up static file content than a pure Python WSGI application may itself be able to achieve. Do note however that for the best performance, static files should always be served by a web server. In the case of mod_wsgi this means by Apache itself rather than mod_wsgi or the WSGI application. Using the web server may not always be possible however, such as for files generated on demand. Example Of Wrapper Usage ------------------------ A WSGI adapter implementing this extension needs to supply a special callable object under the key 'wsgi.file_wrapper' in the 'environ' dictionary passed to the WSGI application. What this callable does will be specific to a WSGI adapter, but it must be a callable that accepts one required positional parameter, and one optional positional parameter. The first parameter is the file like object to be sent, and the second parameter is an optional block size. If the block size is not supplied then the WSGI adapter would choose a value which would be most appropriate for the specific hosting mechanism. Whatever the WSGI adapter does, the result of the callable must be an iterable object which can be used directly as the response from the WSGI application or for passing into any WSGI middleware. Provided the response content isn't consumed by any WSGI middleware and the iterable object gets passed through the WSGI adapter, the WSGI adapter should recognise the special iterable object and trigger any special handling to return the response in a more efficient way. Because the support of this platform specific file handling is optional for any specific WSGI adapter, any user code should be coded so as to be able to cope with it not existing. Using the snippet as described in the WSGI specification as guide, the WSGI application would be written as follows:: def application(environ, start_response): status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) filelike = file('usr/share/dict/words', 'rb') block_size = 4096 if 'wsgi.file_wrapper' in environ: return environ['wsgi.file_wrapper'](filelike, block_size) else: return iter(lambda: filelike.read(block_size), '') Note that the file must always be opened in binary mode. If this isn't done then on platforms which do CR/LF translation automatically then the original content will not be returned but the translated form. As well as it not being the original content this can cause problems with calculated content lengths if the 'Content-Length' response header is returned by the WSGI application and it has been generated by looking at the actual file size rather than the translated content. Addition Of Content Length -------------------------- The WSGI specification does not say anything specific about whether a WSGI adapter should generate a 'Content-Length' response header when the 'wsgi.file_wrapper' extension is used and the WSGI application does not return one itself. For mod_wsgi at least, if the WSGI application doesn't provide a 'Content-Length' response header it will calculate the response content length automatically as being from the current file position to the end of the file. A 'Content-Length' header will then be added to the response for that value. As far as is known, only mod_wsgi automatically supplies a 'Content-Length' response header in this way. If consistent behaviour is required on all platforms, the WSGI application should always calculate the length and add the header itself. Existing Content Length ----------------------- Where a 'Content-Length' is specified by the WSGI application, mod_wsgi will honour that content length. That is, mod_wsgi will only return as many bytes of the file as specified by the 'Content-Length' header. This is not a requirement of the WSGI specification, but then this is one area of the WSGI specification which is arguably broken. This manifests in the WSGI specification where it says: """transmission should begin at the current position within the "file" at the time that transmission begins, and continue until the end is reached""" If this interpretation is used, where a WSGI application supplies a 'Content-Length' header and the number of bytes listed is less than the number of bytes remaining in the file from the current position, then more bytes than specified by the 'Content-Length' header would be returned. To do this would technically be in violation of HTTP specifications which should dictate that the number of bytes returned be the same as that specified by the 'Content-Length' response header if supplied. Not only is this statement in the WSGI specification arguably wrong, the example snippet of code which shows how to implement a fallback where the 'wsgi.file_wrapper' is not present, ie.:: if 'wsgi.file_wrapper' in environ: return environ['wsgi.file_wrapper'](filelike, block_size) else: return iter(lambda: filelike.read(block_size), '') is also wrong. This is because it doesn't restrict the amount of bytes returned to that specified by 'Content-Length'. Although mod_wsgi for normal iterable content would also discard any bytes in excess of the specified 'Content-Length', many other WSGI adapters are not known to do this and would just pass back all content regardless. The result of returning excessive content above the specified 'Content-Length' would be the failure of subsequent connections were the connection using keep alive and was pipe lining requests. This problem is also compounded by the WSGI specification not placing any requirement on WSGI middleware to respect the 'Content-Length' response header when processing response content. Thus WSGI middleware could also in general generate incorrect response content by virtue of not honouring the 'Content-Length' response header. Overall, although mod_wsgi does what is the logical and right thing to do, if you need to write code which is portable to other WSGI hosting mechanisms, you should never produce a 'Content-Length' response header which lists a number of bytes different to that which would be yielded from an iterable object such as a file like object. Thus it would be impossible to use any platform specific file handling features to return a range of bytes from a file. Restrictions On Optimisations ----------------------------- Although mod_wsgi always supplies the 'wsgi.file_wrapper' callable object as part of the WSGI 'environ' dictionary, optimised methods of returning the file contents as the response are not always used. A general restriction is that the file like object must supply both a 'fileno()' and 'tell()' method. This is necessary in order to get access to the underlying file descriptor and to determine the current position within the file. The file descriptor is needed so as to be able to use the 'sendfile()' function to return file contents in a more optimal manner. The 'tell()' method is needed to be able to calculate response 'Content-Length' and to validate that where the WSGI application supplies its own 'Content-Length' header that there is sufficient bytes in the file. Because the 'sendfile()' function is used by Apache to return file contents in a more optimal manner and because on Windows a Python file object only provides a Windows file handle and not a file descriptor, no optimisations are available on the Windows platform. The optimisations are also not able to be used if using Apache 1.3. This is because Apache doesn't provide access to a mechanism for optimised sending of file contents to a content handler under Apache 1.3. Finally, optimisations are not used where the WSGI application is running in daemon mode. This is currently disabled because some UNIX platforms do not appear to support use of the 'sendfile()' function over UNIX sockets and only support INET sockets. This situation may possibly have changed with recent versions of Linux at least but this has yet to be investigated properly. Whether or not optimisations are supported, the mod_wsgi 'wsgi.file_wrapper' extension generally still performs better than if a pure Python iterable object was used to yield the file contents. Note that this all presumes that the iterable object returned by 'wsgi.file_wrapper' is actually passed back to mod_wsgi and is not consumed by a WSGI middleware. For example, a WSGI middleware which compresses the response content would consume the response content and modify it with a different iterable object being returned. In this case there is no chance for optimisations to be used for returning the file contents. This problem isn't restricted though to just where the response content is modified in some way and also extends to any WSGI middleware that wants to replace the 'close()' method to perform some cleanup actions at the end of a request. This is because in order to interject the cleanup actions triggered on the 'close()' method of the iterable object it has to replace the existing iterable object with another which wraps the first, with the outer providing its own 'close()' method. An example of a middleware which replaces the 'close()' method in this way can be found in :doc:`../user-guides/registering-cleanup-code`. It is thus quite easy for a WSGI application stack to inadvertantly defeat completely any attempts to return file contents in an optimised way using the 'wsgi.file_wrapper' extension of WSGI. As such, attempts should always be used instead to make use of a real web server, whether that be a separate web server, or in the case of mod_wsgi the underlying Apache web server. Where necessary, features of web servers or proxies such as 'X-Accel-Redirect', 'X-Sendfile' or other special purpose headers could be used. If using mod_wsgi daemon mode and using mod_wsgi version 3.0 or later, the 'Location' response header can also be used. mod_wsgi-5.0.0/docs/user-guides/frequently-asked-questions.rst000066400000000000000000000263671452636074700246110ustar00rootroot00000000000000========================== Frequently Asked Questions ========================== Apache Process Crashes ---------------------- *Q*: When the mod_wsgi module is initially being loaded by Apache, why does the Apache server processes crash with a 'segmentation fault'? *A*: This is nearly always caused due to mod_python also being loaded by Apache at the same time as mod_wsgi and the Python installation not providing a shared library, or mod_python having originally being built against a static Python library. This is especially a problem with older Linux distributions before they started shipping with Python as a shared library. Further information on these problems can be found in various sections of [:doc:`../user-guides/installation-issues`]. *Q*: When the first request is made against a WSGI application, why does the Apache server process handling the request crash with a 'segmentation fault'? *A*: This is nearly always caused due to a shared library version conflict. That is, Apache or some Apache module is linked against a different version of a library than that which is being used by a particular Python module that the WSGI application makes use of. The most common culprits are the expat and MySQL libraries, but it can also occur with other shared libraries. Another cause of a process crash only upon the first request can be a third party C extension module for Python which has not been implemented so as to work within a secondary Python sub interpreter. The Python bindings for Subversion are a particular example, with the Python module only working correctly if the WSGI application is forced to run within the first interpreter instance created by Python. Further information on these problems can be found in various sections of :doc:`../user-guides/application-issues`. The problems with the expat library are also gone into in more detail in :doc:`../user-guides/issues-with-expat-library`. *Q*: Why am I seeing the error message 'premature end of script headers' in the Apache error logs. *A*: If using daemon mode, this is a symptom of the mod_wsgi daemon process crashing when handling a request. You would probably also see the message 'segmentation fault'. See answer for question about 'segmentation fault' above. This error message can also occur where you haven't configured Apache correctly and your WSGI script file is being executed as a CGI script instead. HTTP Error Responses -------------------- *Q*: When I try to use mod_wsgi daemon mode I get the error response '503 Service Temporarily Unavailable'. *A*: The standard Apache runtime directory has restricted access and the Apache child process cannot access the daemon process sockets. You will need to use the WSGISocketPrefix directive to specify an alternative location for storing of runtime files such as sockets. For further information see section 'Location Of UNIX Sockets' of [:doc:`../user-guides/configuration-issues`]. *Q*: I am getting a HTTP 500 error response and I can't find any error in the Apache error logs. *A*: Some users of mod_wsgi 1.3/2.0 and older minor revisions, are finding that mod_wsgi error messages are going missing, or ending up in the main Apache error log file rather than a virtual host specific error log file. Specifically, this is occurring when Apache ErrorLog directive is being used inside of a VirtualHost container. It is not known exactly what operating system setup and/or Apache configuration is the trigger for this problem. To avoid the problem, use a newer version of mod_wsgi. HTTP Error Log Messages ----------------------- *Q*: Why do I get the error 'IOError: client connection closed' appearing in the error logs? *A*: This occurs when the HTTP client making the request closes the connection before the complete response for a request has been written. This can occur where a user force reloads a web page before it had been completely displayed. It can also occur when using benchmarking tools such as 'ab' as they will over commit on the number of requests they make when doing concurrent requests, killing off any extra requests once the required number has been reached. In general this error message can be ignored. Application Reloading --------------------- *Q*: Do I have to restart Apache every time I make a change to the Python code for my WSGI application? *A*: If your WSGI application is contained totally within the WSGI script file and it is that file that you are changing, then no you don't. In this case the WSGI script file will be automatically reloaded when a change is made provided that script reloading hasn't been disabled. If the code you are changing lies outside of the WSGI script file then what you may need to do will depend on how mod_wsgi is being used. If embedded mode of mod_wsgi is being used, the only option is to restart Apache. You could set Apache configuration directive MaxRequestsPerChild to 1 to force a reload of the application on every request, but this is not recommended because it will perform as bad as or as worse as CGI and will also affect serving up of static files and other applications being hosted by the same Apache instance. If using daemon mode with a single process you can send a SIGINT signal to the daemon process using the 'kill' command, or have the application send the signal to itself when a specific URL is triggered. If using daemon mode, with any number of processes, and the process reload mechanism of mod_wsgi 2.0 has been enabled, then all you need to do is touch the WSGI script file, thereby updating its modification time, and the daemon processes will automatically shutdown and restart the next time they receive a request. Use of daemon mode and the process reload mechanism is the preferred mechanism for handling automatic reloading of code after changes. More details on how source code reloading works with mod_wsgi can be found in :doc:`../user-guides/reloading-source-code`. *Q*: Why do requests against my application seem to take forever, but then after a bit they all run much quicker? *A*: This is because mod_wsgi by default performs lazy loading of any application. That is, an application is only loaded the first time that a request arrives which targets that WSGI application. This means that those initial requests will incur the overhead of loading all the application code and performing any startup initialisation. This startup overhead can appear to be quite significant, especially if using Apache prefork MPM and embedded mode. This is because the startup cost is incurred for each process and with prefork MPM there are typically a lot more processes that if using worker MPM or mod_wsgi daemon mode. Thus, as many requests as there are processes will run slowly and everything will only run full speed once code has all been loaded. Note that if recycling of Apache child processes or mod_wsgi daemon processes after a set number of requests is enabled, or for embedded mode Apache decides itself to reap any of the child processes, then you can periodically see these delayed requests occurring. Some number of the benchmarks for mod_wsgi which have been posted do not take into mind these start up costs and wrongly try to compare the results to other systems such as fastcgi or proxy based systems where the application code would be preloaded by default. As a result mod_wsgi is painted in a worse light than is reality. If mod_wsgi is configured correctly the results would be better than is shown by those benchmarks. For some cases, such as when WSGIScriptAlias is being used, it is actually possible to preload the application code when the processes first starts, rather than when the first request arrives. To preload an application see the WSGIImportScript directive. By preloading the application code you would not normally see delays in requests being handled. The only exception to this would be when running a single process under mod_wsgi daemon mode and the process is being restarted when a maximum number of requests arrives or explicitly via one of the means to trigger reloading of application code. Delays here can be avoided by running at least two processes in the daemon process group. This is because when one process is restarting, the others can handle the requests. Execution Environment --------------------- *Q*: Why do I get the error 'IOError: sys.stdout access restricted by mod_wsgi'? *A*: A portable WSGI application or application component should not output anything to standard output. This is because some WSGI hosting mechanisms use standard output to communicate with the web server. If a WSGI application outputs anything to standard output it will thus potentially interleave with the response sent back to the client. To promote portability of WSGI applications, mod_wsgi by default restricts direct use of 'sys.stdout' and 'sys.stdin'. Because the 'print' statement defaults to outputing text to 'sys.stdout', using 'print' for debugging purposes can cause this error. For more details about this issue, including how applications should do logging and how to disable this restriction see section 'Writing To Standard Output' in :doc:`../user-guides/application-issues` and section 'Apache Error Log Files' in :doc:`../user-guides/debugging-techniques`. *Q*: Can mod_wsgi be used with Python virtual environments created using Ian Bicking's 'virtualenv' package? *A*: Yes. For more details see :doc:`../user-guides/virtual-environments`. Access Control Mechanisms ------------------------- *Q*: Why are client user credentials not being passed through to the WSGI application in the 'HTTP_AUTHORIZATION' variable of the WSGI environment? *A*: User credentials are not passed by default as doing so is insecure and could expose a users password to WSGI applications which shouldn't be permitted to see it. Such a situation might occur within a corporate setting where HTTP authentication mechanisms were used to control access to a corporate web server but it was possible for users to provide their own web pages. The last thing a system administator will want is normal users being able to see other users passwords. As a result, the passing of HTTP authentication credentials must be explicitly enabled by the web server administrator. This can only be done using directives placed in the main Apache confguration file. For further information see :doc:`../user-guides/access-control-mechanisms` and the documentation for the WSGIPassAuthorization directive. *Q*: Is there a way of having a WSGI application provide user authentication for resources outside of the application such as static files, CGI scripts or even a distinct application? In other words, something akin to being able to define access, authentication and authorisation handlers in mod_python. *A*: Provided you are using Apache 2.0 or later, version 2.0 of mod_wsgi provides support for hooking into the Apache access, authentication and authorisation handler phases. This doesn't allow full control of how the Apache handler is implemented, but does allow control over how user credentials are validated, determination of what groups a user is a member of and whether specific hosts are allowed access. This is generally more than sufficient and makes the task somewhat simpler than needing to implement a full handler like in mod_python as Apache and mod_wsgi do all the hard work. For further information see :doc:`../user-guides/access-control-mechanisms`. mod_wsgi-5.0.0/docs/user-guides/installation-issues.rst000066400000000000000000000601071452636074700232760ustar00rootroot00000000000000=================== Installation Issues =================== Although mod_wsgi is not a large package in itself, it depends on both Apache and Python to get it compiled and installed. Because Apache and Python are complicated systems in their own right, various problems can come up during installation of mod_wsgi. These problems can arise for various reasons, including an incomplete or suboptimal Python installation or presence of multiple Python versions. The purpose of this document is to capture all the known problems that can arise regarding installation, including workarounds if available. If you are having a problem which doesn't seem to be covered by this document, also make sure you see :doc:`../user-guides/configuration-issues` and :doc:`../user-guides/application-issues`. Missing Python Header Files --------------------------- In order to compile mod_wsgi from source code you must have installed the full Python distribution, including header files. On a Linux distribution where binary Python packages are split into a runtime package and a developer package, the developer package is often not installed by default. This means that you will be missing the header files required to compile mod_wsgi from source code. An example of the error messages you will see if the developer package is not installed are:: mod_wsgi.c:113:20: error: Python.h: No such file or directory mod_wsgi.c:114:21: error: compile.h: No such file or directory mod_wsgi.c:115:18: error: node.h: No such file or directory mod_wsgi.c:116:20: error: osdefs.h: No such file or directory mod_wsgi.c:119:2: error: #error Sorry, mod_wsgi requires at least Python 2.3.0. mod_wsgi.c:123:2: error: #error Sorry, mod_wsgi requires that Python supporting thread. To remedy the problem, install the developer package for Python corresponding to the Python runtime package you have installed. What the name of the developer package is can vary from one Linux distribution to another. Normally it has the same name as the Python runtime package with ``-dev`` appended to the package name. You will need to lookup up list of available packages in your packaging system to determine actual name of package to install. Lack Of Python Shared Library ----------------------------- In the optimal case, when mod_wsgi is compiled the resulting Apache module should be less than 250 Kbytes in size. If this is not the case and the module is over 1MB in size, it indicates that the version of Python being used was not originally configured so as to produce a Python shared library and is instead only producing a static library. Although the existance of only a static library for Python doesn't normally cause compilation of mod_wsgi to fail, it does mean that when 'libtool' is used to generate the mod_wsgi Apache module, that it has to embed the actual static library objects into the Apache module instead of it being used as a shared library. The consequences of this are that when the mod_wsgi Apache module is loaded by Apache, the operating system dynamic linker has to perform address relocations on the Python library component of the mod_wsgi Apache module. Because these relocations require memory to be modified, the full Python library then becomes private memory to the process and not shared. On a Linux system this need to perform the address relocations at runtime will immediately cause each Apache child process to bloat out in size by between 1 and 2MB. On a Solaris system, depending on which compiler is being used and which options, the amount of additional memory used can be 5MB or more. To determine whether the compiled mod_wsgi module is making use of a shared library for Python, many UNIX systems provide the 'ldd' program. The output from running this on the 'mod_wsgi.so' file would be something like:: $ ldd mod_wsgi.so linux-vdso.so.1 => (0x00007fffeb3fe000) libpython2.5.so.1.0 => /usr/local/lib/libpython2.5.so.1.0 (0x00002adebf94d000) libpthread.so.0 => /lib/libpthread.so.0 (0x00002adebfcba000) libdl.so.2 => /lib/libdl.so.2 (0x00002adebfed6000) libutil.so.1 => /lib/libutil.so.1 (0x00002adec00da000) libc.so.6 => /lib/libc.so.6 (0x00002adec02dd000) libm.so.6 => /lib/libm.so.6 (0x00002adec0635000) /lib64/ld-linux-x86-64.so.2 (0x0000555555554000) Note how there is a dependency listed on the '.so' file for Python. If this is not present then mod_wsgi is using a static Python library. Although mod_wsgi will still work when compiled against a version of Python which only provides a static library, you are highly encouraged to ensure that your Python installation has been configured and compiled with the ``--enable-shared`` option to enable the production and use of a shared library for Python. If rebuilding Python to generate a shared library, do make sure that the Python shared library, or a symlink to it appears in the Python 'config' directory of your Python installation. If the shared library doesn't appear here next to the static version of the library, 'libtool' will not be able to find it and will still use the static version of the library. It is understood that the Python build process may not actually do this, so you may have to do it by hand. To check, go to the Python 'config' directory of your Python installation and do a directory listing:: $ ls -las 4 drwxr-sr-x 2 root staff 4096 2007-11-29 23:26 . 20 drwxr-sr-x 21 root staff 20480 2007-11-29 23:26 .. 4 -rw-r--r-- 1 root staff 2078 2007-11-29 23:26 config.c 4 -rw-r--r-- 1 root staff 1446 2007-11-29 23:26 config.c.in 8 -rwxr-xr-x 1 root staff 7122 2007-11-29 23:26 install-sh 7664 -rw-r--r-- 1 root staff 7833936 2007-11-29 23:26 libpython2.5.a 40 -rw-r--r-- 1 root staff 38327 2007-11-29 23:26 Makefile 8 -rwxr-xr-x 1 root staff 7430 2007-11-29 23:26 makesetup 8 -rw-r--r-- 1 root staff 6456 2007-11-29 23:26 python.o 20 -rw-r--r-- 1 root staff 17862 2007-11-29 23:26 Setup 4 -rw-r--r-- 1 root staff 368 2007-11-29 23:26 Setup.config 4 -rw-r--r-- 1 root staff 41 2007-11-29 23:26 Setup.local If you only see a '.a' file for Python library, then either Python wasn't installed with the shared library, or the shared library was placed elsewhere. What appears to normally happen is that the shared library is actually placed in the 'lib' directory two levels above the Python 'config' directory. In that case you need to create a symlink in the 'config' directory to where the shared library is actually installed:: $ ln -s ../../libpython2.5.so . Apart from the additional memory consumption when using a static library, it is also preferable that a shared library be used where it is possible that you will upgrade your Python installation to a newer patch revision. This is because if you upgrade Python to a newer patch revision but do not recompile mod_wsgi, mod_wsgi will still incorporate the older static Python library and will not pick up any changes from the newer version of Python. This will result in undefined behaviour as the Python library code may not match up with the Python code modules or external modules in the Python installation. If a Python shared library is used, this will not be a problem. Multiple Python Versions ------------------------ Where there are multiple versions of Python installed on a system and it is necessary to ensure that a specific version is used, the ``--with-python`` option can be supplied to 'configure' when installing mod_wsgi:: ./configure --with-python=/usr/local/bin/python2.5 This may be necessary where for example the default Python version supplied with the system is an older version of Python. More specifically, it would be required where it isn't possible to replace the older version of Python outright due to operating system management scripts being dependent on the older version of Python and not working with newer versions of Python. Where multiple versions of Python are present and are installed under the same directory, this should generally be all that is required. If however the newer version of Python you wish to use is in a different location, for example under '/usr/local', it is possible that when Apache is started that it will not be able find the Python library files for the version of Python you wish to use. This can occur because the Python library when initialised determines where the Python installation resides by looking through directories specified in the 'PATH' environment variable for the 'python' executable and using that as base location for calculating installation prefix. Specifically, the directory above the directory containing the 'python' executable is taken as being the installation prefix. When the Python which should be used is installed in a non standard location, then that 'bin' directory is unlikely to be in the 'PATH' used by Apache when it is started. As such, rather than find '/usr/local/bin/python' it would instead find '/usr/bin/python' and so use '/usr' rather than the directory '/usr/local/' as the installation prefix. When this occurs, if under '/usr' there was no Python installation of the same version number as Python which should be used, then normally:: 'import site' failed; use -v for traceback would appear in the Apache error log file when Python is first being initialised within Apache. Any attempt to make a request against a WSGI application would also result in errors as no modules at all except for inbuilt modules, would be able to be found when an attempt is made to import them. Alternatively, if there was a Python installation of the same version, albeit not the desired installation, then there may be no obvious issues on startup, but at run time you may find modules cannot be found when being imported as they are installed into a different location than that which was being used. Even if equivalent module is found, it could fail at run time in subtle ways if the two Python installations are of same version but at the different locations are compiled in different ways, or if it is a third party module and they are different versions and so API is different. In this situation it will be necessary to explicitly tell mod_wsgi where the Python executable for the version of Python which should be used, is located. This can be done using the WSGIPythonHome directive:: WSGIPythonHome /usr/local The value given to the WSGIPythonHome directive should be a normalised path corresponding to that defined by the Python {{{sys.prefix}}} variable for the version of Python being used and passed to the ``--with-python`` option when configuring mod_wsgi:: >>> import sys >>> sys.prefix '/usr/local' An alternative, although less desirable way of achieving this is to set the 'PATH' environment variable in the startup scripts for Apache. For a standard Apache installation using ASF structure, this can be done by editing the 'envvars' file in same directory as the Apache executable and adding the alternate bin directory to the head of the 'PATH':: PATH=/usr/local/bin:$PATH export PATH If there are any concerns over what Python installation directory is being used and you want to verify what it is, then use a small test WSGI script which outputs the values of 'sys.prefix' and 'sys.path'. For example:: import sys def application(environ, start_response): status = '200 OK' output = b'Hello World!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) print >> sys.stderr, 'sys.prefix = %s' % repr(sys.prefix) print >> sys.stderr, 'sys.path = %s' % repr(sys.path) return [output] Using ModPython and ModWsgi --------------------------- Using mod_python and mod_wsgi together is no longer supported and recent versions of mod_wsgi will cause the startup of Apache to be aborted if both are loaded at the same time. Python Patch Level Mismatch --------------------------- If the Python package is upgraded to a newer patch level revision, one will likely see the following warning messages in the Apache error log when Python is being initialised:: mod_wsgi: Compiled for Python/2.4.1. mod_wsgi: Runtime using Python/2.4.2. The warning is indicating that a newer version of Python is now being used than what mod_wsgi was originally compiled for. This would generally not be a problem provided that both versions of Python were originally installed with the ``--enable-shared`` option supplied to 'configure'. If this option is used then the Python library will be linked in dynamically at runtime and so an upgrade to the Python version will be automatically used. If ``--enable-shared`` was however not used and the Python library is therefore embedded into the actual mod_wsgi Apache module, then there is a risk of undefined behaviour. This is because the version of the Python library embedded into the mod_wsgi Apache module will be older than the corresponding Python code modules and extension modules being used from the Python library directory. Thus, if a shared library is not being used for Python it will be necessary to rebuild mod_wsgi against the newer patch level revision of mod_wsgi and reinstall it. Mixing 32 Bit And 64 Bit Packages --------------------------------- When attempting to compile mod_wsgi on a Linux system using an X86 64 bit processor, the following error message can arise:: /bin/sh /usr/lib64/apr/build/libtool --silent --mode=link gcc -o \ mod_wsgi.la -I/usr/local/include/python2.4 -DNDEBUG -rpath \ /usr/lib64/httpd/modules -module -avoid-version mod_wsgi.lo \ -L/usr/local/lib/python2.4/config -lpython2.4 -lpthread -ldl -lutil /usr/bin/ld: /usr/local/lib/python2.4/config/ libpython2.4.a(abstract.o): relocation R_X86_64_32 against `a local symbol' can not be used when making a shared object; recompile with -fPIC /usr/local/lib/python2.4/config/libpython2.4.a: could not read symbols: Bad value collect2: ld returned 1 exit status apxs:Error: Command failed with rc=65536 . make: *** [mod_wsgi.la] Error 1 This error is believed to be result of the version of Python being used having been originally compiled for the generic X86 32 bit architecture whereas mod_wsgi is being compiled for X86 64 bit architecture. The actual error arises in this case because 'libtool' would appear to be unable to generate a dynamically loadable module for the X86 64 bit architecture from a X86 32 bit static library. Alternatively, the problem is due to 'libtool' on this platform not being able to create a loadable module from a X86 64 bit static library in all cases. If the first issue, the only solution to this problem is to recompile Python for the X86 64 bit architecture. When doing this, it is preferable, and may actually be necessary, to ensure that the ``--enable-shared`` option is provided to the 'configure' script for Python when it is being compiled and installed. If rebuilding Python to generate a shared library, do make sure that the Python shared library, or a symlink to it appears in the Python 'config' directory of your Python installation. If the shared library doesn't appear here next to the static version of the library, 'libtool' will not be able to find it and will still use the static version of the library. It is understood that the Python build process may not actually do this, so you may have to do it by hand. If the version of Python being used was compiled for X86 64 bit architecture and a shared library does exist, but not in the 'config' directory, then adding the missing symlink may be all that is required. Unable To Find Python Shared Library ------------------------------------ When mod_wsgi is built against a version of Python providing a shared library, the Python shared library must be in a directory which is searched for libraries at runtime by Apache. If this isn't the case the Python shared library will not be able to be found when loading the mod_wsgi module in to Apache. The error in this situation will be similar to:: error while loading shared libraries: libpython2.4.so.1.0: \ cannot open shared object file: No such file or directory A number of alternatives exist for resolving this problem. The preferred solution would be to copy the Python shared library into a directory which is searched for dynamic libraries at run time. Directories which would generally always be searched are '/lib' and '/usr/lib'. For some systems the directory '/usr/local/lib' may also be searched, but this may depend on the directory having been explicitly added to the approrpiate system file listing the directories to be searched. The name and location of this configuration file differs between platforms. On Linux systems it is often called '/etc/ld.so.conf'. If changes are made to the file on Linux systems the 'ldconfig' command also needs to be run. See the manual page for 'ldconfig' for further details. Rather than changing the system wide list of directories to search for shared libraries, additional search directories can be specified just for Apache. On Linux this would entail setting the 'LD_LIBRARY_PATH' environment variable to include the directory where the Python shared library is installed. The setting and exporting of the environment variable would be placed in the Apache 'envvars' file, for a standard Apache installation, located in the same directory as the Apache web server executable. If using a customised Apache installation, such as on Red Hat, the 'envvars' file may not exist. In this case you would need to add this into the actual startup script for Apache. For Red Hat this is '/etc/sysconfig/httpd'. A final alternative on some systems is to embed the directory to search for the Python shared library into the mod_wsgi Apache module itself. On Linux systems this can be done by setting the environment variable 'LD_RUN_PATH' to the directory containing the Python shared library when initially building the mod_wsgi source code. GNU C Stack Smashing Extensions ------------------------------- Various Linux distributions are starting to ship with a version of the GNU C compiler which incorporates an extension which implements protection for stack-smashing. In some instances where such a compiler is used to build mod_wsgi, the module is unable to then be loaded by Apache. The specific problem is that the symbol ``__stack_chk_fail_local`` is being flagged as undefined:: $ invoke-rc.d apache2 reload apache2: Syntax error on line 190 of /etc/apache2/apache2.conf: \ Cannot load /usr/lib/apache2/modules/mod_wsgi.so into server: \ /usr/lib/apache2/modules/mod_wsgi.so: \ undefined symbol: __stack_chk_fail_local failed! invoke-rc.d: initscript apache2, action "reload" failed. The exact reason for this is not known but it is speculated to be caused when the system libraries or Apache itself has not been compiled with a version of the GNU C compiler incorporating the extension. To workaround the problem, modify the 'Makefile' for mod_wsgi and change the value of 'CFLAGS' to:: CFLAGS = -Wc,-fno-stack-protector Perform a 'clean' in the directory and then rebuild and reinstall the mod_wsgi module. Undefined 'forkpty' On Fedora 7 ------------------------------- On Fedora 7, the provided binary version of Apache is not linked against the 'libutil' system library. This causes problems when Python is initialised and the 'posix' module imported for the first time. This is because the 'posix' module requires functions from 'libutil' but they will not be present. The error encountered would be similar to:: httpd: Syntax error on line 54 of /etc/httpd/conf/httpd.conf: Cannot \ load /etc/httpd/modules/mod_wsgi.so into server: \ /etc/httpd/modules/mod_wsgi.so: undefined symbol: forkpty This problem can be fixed by adding ``-lutil`` to the list of libraries to link mod_wsgi against when it is being built. This can be done by adding ``-lutil`` to the 'LDLIBS' variable in the mod_wsgi 'Makefile' after having run 'configure'. An alternative method which may work is to edit the 'envvars' file, if it exists and is used, located in the same directory as the Apache 'httpd' executable, or the Apache startup script, and add:: LD_PRELOAD=/usr/lib/libutil.so export LD_PRELOAD Missing Include Files On SUSE ----------------------------- SUSE Linux follows a slightly different convention to other Linux distributions and has split their Apache "dev" packages in a way as to allow packages for different Apache MPMs to be installed at the same time. Although the resultant mod_wsgi module isn't strictly MPM specific, it does indirectly include the MPM specific header file "mpm.h". Because the header file is MPM specific, when configuring mod_wsgi, it is necessary to reference the version of "apxs" from the MPM specific "dev" package else the "mpm.h" header file will not be found at compile time. These errors are:: In file included from mod_wsgi.c:4882: /usr/include/apache2/mpm_common.h:46:17: error: mpm.h: No such file or directory ... mod_wsgi.c: In function 'wsgi_set_accept_mutex': mod_wsgi.c:5200: error: 'ap_accept_lock_mech' undeclared (first use in this function) mod_wsgi.c:5200: error: (Each undeclared identifier is reported only once mod_wsgi.c:5200: error: for each function it appears in.) apxs:Error: Command failed with rc=65536 To avoid this problem, when configuring mod_wsgi, it is necessary to use the ``--with-apxs`` option to designate that either "apxs2-worker" or "apxs2-prefork" should be used. Thus:: ./configure --with-apxs=/usr/sbin/apxs2-worker or:: ./configure --with-apxs=/usr/sbin/apxs2-prefork Although which is used is not important, since mod_wsgi when compiled isn't specific to either, best to use that which corresponds to the version of Apache being used. Apache Maintainer Mode ---------------------- When building mod_wsgi from source code, on UNIX systems there should be minimal if no compiler warnings. If you see a lot of warnings, especially complaints about ``ap_strstr``, then your Apache installation has been configured for maintainer mode:: mod_wsgi.c: In function 'wsgi_process_group': mod_wsgi.c:722: warning: passing argument 1 of 'ap_strstr' discards qualifiers from pointer target type mod_wsgi.c:740: warning: passing argument 1 of 'ap_strstr' discards qualifiers from pointer target type Specifically, whoever built the version of Apache being used supplied the option ``--enable-maintainer-mode`` when configuring Apache prior to installation. You would be able to tell at the time of compiling mod_wsgi if this has been done as the option ``-DAP_DEBUG`` would be supplied to the compiler when mod_wsgi source code is compiled. These warnings can be ignored, but in general you shouldn't run Apache in maintainer mode. A further reason for not running Apache in maintainer mode is that certain situations can cause Apache to fail an internal assertion check when using mod_wsgi. The specific error message is:: [crit] file http_filters.c, line 346, assertion "readbytes > 0" failed [notice] child pid 18551 exit signal Aborted (6) This occurs because the Apache code has an overly agressive assertion check, which is arguably incorrect. This particular assertion check will fail when a zero length read is perform on the Apache 'HTTP_IN' input filter. This scenario can arise in mod_wsgi due to a workaround in place to get around a bug in Apache related to generation of '100-continue' response. The Apache bug is described in: * https://issues.apache.org/bugzilla/show_bug.cgi?id=38014 The scenario can also be triggered as a result of a WSGI application performing a zero length read on 'wsgi.input'. Changes to mod_wsgi are being investigated to see if zero length reads can be ignored, but due to the workaround for the bug, this would only be able to be done for Apache 2.2.8 or later. The prefered solution is simply not to use Apache with maintainer mode enabled for systems where you are running real code. Unfortunately, it looks like some Linux distributions, eg. SUSE, accidentally released Apache binary packages with this mode enabled by default. You should update to a Apache binary package that doesn't have the mode enabled, or compile from source code. mod_wsgi-5.0.0/docs/user-guides/installation-on-macosx.rst000066400000000000000000000162061452636074700236700ustar00rootroot00000000000000======================= Installation On MacOS X ======================= If you are using MacOS X, mod_wsgi can be compiled from source code against the standard versions of Python and Apache httpd server supplied with the operating system. To do this though you will first need to have installed the Xcode command line tools. The Xcode command line tools package provides a C compiler, along with header files and support tools for the Apache httpd server. If you have already set up your system so as to be able to install additional Python packages which include C extensions, you likely will already have the Xcode command line tools. Install Xcode command line tools -------------------------------- To install the Xcode command line tools you should run the command:: xcode-select --install If this gives you back the error message:: xcode-select: error: command line tools are already installed, use "Software Update" to install updates then the tools have already been installed. As noted by the warning message, do make sure you have run a system software update to ensure that you have the latest versions of these tools. If you do not already have the Xcode command line tools installed, running that ``xcode-select`` command should result in you being prompted to install them. This may ask you to provide the details of an administrator account along with the password for that account. Note that it is not necessary to install the whole of the Xcode developer application from the MacOS X App Store, only the command line tools using ``xcode-select``. If you have installed the Xcode developer application, still ensure that the command line tools are installed and ensure you have run the system software update. Configuring and building mod_wsgi --------------------------------- If you are using the Python and Apache httpd server packages provided with the operating system, all you need to do to configure the mod_wsgi source code before building it is to run in the mod_wsgi source code directory:: ./configure This should yield output similar to:: checking for apxs2... no checking for apxs... /usr/sbin/apxs checking for gcc... gcc checking whether the C compiler works... yes checking for C compiler default output file name... a.out checking for suffix of executables... checking whether we are cross compiling... no checking for suffix of object files... o checking whether we are using the GNU C compiler... yes checking whether gcc accepts -g... yes checking for gcc option to accept ISO C89... none needed checking for prctl... no checking Apache version... 2.4.18 checking for python... /usr/bin/python configure: creating ./config.status config.status: creating Makefile The ``configure`` script should show that it has detected ``apxs`` as being located at ``/usr/sbin/apxs`` and ``python`` as being at ``/usr/bin/python``. If you get different values for ``apxs`` and ``python`` then it means that you likely have a separate installation of Python or the Apache httpd server installed on your system. If this is the case, to ensure that you use the versions of Python and Apache httpd server provided with the operating system instead use the command:: ./configure --with-python=/usr/bin/python --with-apxs=/usr/sbin/apxs Once you have configured the source code by running ``configure``, you can build mod_wsgi using the command:: make This will compile the mod_wsgi source code and produce a single ``mod_wsgi.so`` file which then needs to be installed into a common location so that the Apache httpd server can use it. Installing the mod_wsgi module ------------------------------ What you need to do to install the mod_wsgi module depends on which version of MacOS X you are using. For the Apache httpd server provided by the operating system, the directory ``/usr/libexec/apache2`` is used to store the compiled modules. Prior to MacOS X El Capitan (10.11) this directory was writable and the mod_wsgi module could be installed here along with all the default modules. With the introduction of the System Integrity Protection (SIP_) feature in MacOS X El Capitan this directory is not writable, not even to the root user. Because of this, if you are using a version of MacOS X prior to MacOS X El Capitan (10.11) you can use the command:: sudo make install to install the mod_wsgi module. As ``sudo`` is being run, you will be prompted for your password. The module will be installed into the directory ``/usr/libexec/apache2``. Within the Apache httpd server configuration file you can then use the standard ``LoadModule`` line of:: LoadModule wsgi_module libexec/apache2/mod_wsgi.so If however you are using MacOS X El Capitan (10.11) or later, the mod_wsgi module will need to be installed into a different location. If you don't and try to run just ``sudo make install``, it will fail with the output:: ./apxs -i -S LIBEXECDIR=/usr/libexec/apache2 -n 'mod_wsgi' src/server/mod_wsgi.la /usr/share/httpd/build/instdso.sh SH_LIBTOOL='./libtool' src/server/mod_wsgi.la /usr/libexec/apache2 ./libtool --mode=install install src/server/mod_wsgi.la /usr/libexec/apache2/ libtool: install: install src/server/.libs/mod_wsgi.so /usr/libexec/apache2/mod_wsgi.so install: /usr/libexec/apache2/mod_wsgi.so: Operation not permitted apxs:Error: Command failed with rc=4653056 . make: *** [install] Error 1 The directory you use to install the mod_wsgi module is up to you, but one suggested option is that you use the directory ``/usr/local/httpd/modules``. Just ensure that this isn't already used by a separate installation of the Apache httpd server. To install the mod_wsgi module into this directory use the command:: sudo make install LIBEXECDIR=/usr/local/httpd/modules The output from the command will be similar to:: mkdir -p /usr/local/httpd/modules ./apxs -i -S LIBEXECDIR=/usr/local/httpd/modules -n 'mod_wsgi' src/server/mod_wsgi.la /usr/share/httpd/build/instdso.sh SH_LIBTOOL='./libtool' src/server/mod_wsgi.la /usr/local/httpd/modules ./libtool --mode=install install src/server/mod_wsgi.la /usr/local/httpd/modules/ libtool: install: install src/server/.libs/mod_wsgi.so /usr/local/httpd/modules/mod_wsgi.so libtool: install: install src/server/.libs/mod_wsgi.lai /usr/local/httpd/modules/mod_wsgi.la libtool: install: install src/server/.libs/mod_wsgi.a /usr/local/httpd/modules/mod_wsgi.a libtool: install: chmod 644 /usr/local/httpd/modules/mod_wsgi.a libtool: install: ranlib /usr/local/httpd/modules/mod_wsgi.a libtool: install: warning: remember to run `libtool --finish /usr/libexec/apache2' chmod 755 /usr/local/httpd/modules/mod_wsgi.so The warning about needing to run ``libtool --finish`` can be ignored as it is not required for everything to work. With the mod_wsgi module installed in this location, the ``LoadModule`` line in the Apache httpd configuration file should be:: LoadModule wsgi_module /usr/local/httpd/modules/mod_wsgi.so Normal steps to then configure the Apache httpd server and mod_wsgi for your specific WSGI application would then be followed. .. _SIP: https://en.wikipedia.org/wiki/System_Integrity_Protection mod_wsgi-5.0.0/docs/user-guides/issues-with-expat-library.rst000066400000000000000000000250401452636074700243260ustar00rootroot00000000000000========================= Issues With Expat Library ========================= This article describes problems caused due to mismatches in the version of the "expat" library embedded into Python and that linked into Apache. Where incompatible versions are used, Apache can crash as soon as any Python code module imports the "pyexpat" module. Note that this only applies to Python versions prior to Python 2.5. From Python 2.5 onwards, the copy of the "expat" library bundled in with Python is name space prefixed, thereby avoid name clashes with an "expat" library which has previously been loaded. The Dreaded Segmentation Fault ------------------------------ When moving beyond creating simple WSGI applications to more complicated tasks, one can unexpectedly be confronted with Apache crashing. This generally manifests in no response being returned to the browser when a request is made. Upon further investigation of the Apache error log file, a message similar to the following message is found:: [notice] child pid 3238 exit signal Segmentation fault (11) The change which causes this is the explicit addition of code to import the Python module "pyexpat", or the importing of any Python module which indirectly makes use of the "pyexpat" module. Examples of other modules which make use of the "pyexpat" module are "xmlrpclib" and modules from the "PyXML" package. Nearly always, any module which in some way performs processing of XML data will be affected as most such modules rely on using the "pyexpat" module in some way. Verifying Expat Is The Problem ------------------------------ To verify that the "pyexpat" module is the trigger for the problem, construct a simple WSGI application script file containing:: def application(environ, start_response): status = '200 OK' output = 'without expat\n' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] Verify that this handler works and the browser receives the response "without pyepxat". Now modify the handler such that the "pyexpat" module is being imported. Also change the response so that it is clear that the modified handler is being used:: import pyexpat def application(environ, start_response): status = '200 OK' output = 'with expat\n' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] Presuming that script reloading is enabled, if now upon a request being received by the WSGI application a succesful response of "with pyexpat" is received by the browser, it would generally indicate that the "pyexpat" module is not the problem after all. If however no response is received and the Apache error log records a "Segmentation fault" then the "pyexpat" module is the trigger. Mismatch In Versions Of Expat ----------------------------- Segmentation faults can occur with any application where different components of the application were compiled against different versions of a common library such as the "expat" library. The actual cause of the problem is generally a change in the API of the library, such as changed function prototypes, changed data types, or changes in structure layouts. In the case where mod_wsgi is being used, the different components are Apache and the "pyexpat" module from Python. Normally when different components of an application are built, they would be built against the same version of the library and such problems would not occur. In the case of the "pyexpat" module however, it is compiled against a distinct version of the "expat" library which is then embedded within the "pyexpat" module. At the same time, Apache will be built against the version of the "expat" library included with the operating system, or if not a standard part of the operating system, a version which is supplied with Apache. Thus if the version of the "expat" library embedded into the "pyexpat" module is different to that which Apache was compiled against, the potential for this problem will exist. Note though that there may not always be a problem. Whether there is or not will ultimately depend on what changes were made in the "expat" library between the releases of the different versions used. It is also possible how each library version was compiled could be a factor. Expat Version Used By Apache ---------------------------- To determine the version of the the "expat" library which is used by Apache, on Linux the "ldd" command can be used. Other operating systems also provide this program or will generally have some form of equivalent program. For example, on Mac OS X the command which is run is "otool -L". The purpose of these programs is to generate a list of all shared libraries that an application is linked against. To determine where the "expat" library being used by Apache is located, it is necessary to run the "ldd" program on the "httpd" program. On a Linux system, the "httpd" program is normally located in "/usr/sbin". Because we are only interested in the "expat" library, we can ignore anything but the reference to that library:: [grahamd@dscpl grahamd]$ ldd /usr/sbin/httpd | grep expat libexpat.so.0 => /usr/lib/libexpat.so.0 (0xb7e8c000) From this output it can be seen that the "httpd" program appears to be using "/usr/lib/libexpat.so.0". Although some operating systems embed in the name of the shared library versioning information, it does not generally indicate the true version of the code base which made up the library. To obtain this, it is necessary to extract the version information out of the library. For the "expat" library this can be determined by searching within the strings contained in the library for a version string starting with ``expat_``:: [grahamd@dscpl grahamd]$ strings /usr/lib/libexpat.so.0 | grep expat_ expat_1.95.8 The version of the "expat" library would therefore appear to be "1.95.8". Unfortunately though, many operating systems allow the library search path to be overridden at the point that a program is run using an environment variable such as "LD_LIBRARY_PATH" and it is quite possible that when Apache is run, the context in which it is run could result in it finding the "expat" library in a different location. To be absolutely sure, it is necessary to determine which "expat" library the running copy of Apache used. On Linux and many other operating systems, this can be determined using the "lsof" command. If this program doesn't exist, an alternate program which may be available is "ofiles". Either of these should be run against one of the active Apache processes. If Apache was originally started as root, the command will also need to be run as root:: [grahamd@dscpl grahamd]$ ps aux | grep http | head -3 root 3625 0.0 0.6 31068 12836 ? SN Sep25 0:08 /usr/sbin/httpd apache 24814 0.0 0.7 34196 15604 ? SN 04:11 0:00 /usr/sbin/httpd apache 24815 0.0 0.7 33924 15916 ? SN 04:11 0:00 /usr/sbin/httpd [grahamd@dscpl grahamd]$ sudo /usr/sbin/lsof -p 3625 | grep expat httpd 3625 root mem REG 253,0 123552 6409040 /usr/lib/libexpat.so.0.5.0 [grahamd@dscpl grahamd]$ strings /usr/lib/libexpat.so.0.5.0 | grep expat_ expat_1.95.8 Expat Version Used By Python ---------------------------- To determine the version of the "expat" library which is embedded in the Python "pyexpat" module, the module should be imported and the version information extracted from the module. This can be done by executing "python" on the command line and entering the necessary code directly:: [grahamd@dscpl grahamd]$ python Python 2.3.3 (#1, May 7 2004, 10:31:40) [GCC 3.3.3 20040412 (Red Hat Linux 3.3.3-7)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import pyexpat >>> pyexpat.version_info (1, 95, 7) Combining Python And Apache --------------------------- When mod_wsgi is used from within Apache, although there is a version of the "expat" library embedded in the "pyexpat" module, it will effectively be ignored. This is because Apache has already loaded into memory at startup the version of the "expat" library which it is linked against. That this occurs can be seen by using the ability of Linux to forcibly preload a shared library into a program when run, even though that program wasn't linked against the library orginally. This is achieved using the "LD_PRELOAD" environment variable:: [grahamd@dscpl grahamd]$ LD_PRELOAD=/usr/lib/libexpat.so.0.5.0 python Python 2.3.3 (#1, May 7 2004, 10:31:40) [GCC 3.3.3 20040412 (Red Hat Linux 3.3.3-7)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import pyexpat >>> pyexpat.version_info (1, 95, 8) As can be seen, although the "pyexpat" module for this version of Python embedded version 1.95.7 of the "expat" library, when the same version of the "expat" library as was being used by Apache is forcibly loaded into the program at startup, the version information obtained from the "pyexpat" module now shows that version 1.95.8 of the "expat" library is being used. Luckily in this case, the patch level difference between the two versions of the "expat" library as used by Python and Apache doesn't cause a problem. If however the two versions of the "expat" library were incompatible, one would expect to see the "python" program crash with a segmentation fault at this point. This therefore can be used as an alternate way of verifying that it is the "pyexpat" module and more specifically the version of the "expat" library used, that is causing the problem. Updating System Expat Version ----------------------------- Because the version of the "expat" library embedded within the "pyexpat" module is shipped as source code within the Python distribution, it can be hard to replace it. The preferred approach to resolving the mismatch is therefore to replace/update the version of the "expat" library that is used by Apache. Generally the problem occurs where that used by Apache is older than that which is being used by Python. In that case, the version of the "expat" library used by Apache should be updated to be the same version as that embedded within the "pyexpat" module. By using the same version, one would expect any problems to disappear. If problems still persist, it is possible that Apache may also need to be recompiled against the same version of the "expat" library as used in Python. mod_wsgi-5.0.0/docs/user-guides/issues-with-pickle-module.rst000066400000000000000000000154661452636074700243100ustar00rootroot00000000000000========================= Issues With Pickle Module ========================= This article describes various limitations on what data can be stored using the "pickle" module from a WSGI application script file. This arises due to the fact that a WSGI application script file is not treated exactly the same as a standard Python module. Note that these limitations only apply to the WSGI application script file which is the target of the WSGIScriptAlias, AddHandler or Action directives. Any standard Python modules or packages which make up an application and which are being imported from directories located in ``sys.path`` using the 'import' statement are not affected. Packing And Script Reloading ---------------------------- The first source of problems and limitations is how the operation of the "pickle" serialisation routine is affected by the ability of mod_wsgi to automatically reload WSGI application script files. The particular types of data which are known to be affected are function objects and class objects. To illustrate the problems and where they arise, consider the following output from an interactive Python session:: >>> import pickle >>> def a(): pass ... >>> pickle.dumps(a) 'c__main__\na\np0\n.' >>> z = a >>> pickle.dumps(z) 'c__main__\na\np0\n.' As can be seen, it is possible to pickle a function object. This can be done even through a copy of the function object by reference, although in that case the pickled object still refers to the original function object. If now the original function object is deleted however, and the copy of the function object is pickled, a failure will occur:: >>> del a >>> pickle.dumps(z) Traceback (most recent call last): ... pickle.PicklingError: Can't pickle : it's not found as __main__.a The exception has been raised because the original function object was deleted from where it was created. It occurs because the copy of the original function object is still internally identified by the name which it was assigned at the point of creation. The "pickle" serialisation routine will check that the original object as identified by the name still exists. If it doesn't exist, it will refuse to serialise the object. Creating a new function object in place of the original function object does not eliminate the problem, although it does result in a different sort of exception:: >>> def a(): pass ... >>> pickle.dumps(z) Traceback (most recent call last): ... pickle.PicklingError: Can't pickle : it's not the same object as __main__.a In this case, the "pickle" serialisation routine recognises that "a" exists but realises that it is actually a different function object from which the "z" copy was originally made. Where the problems start occuring with mod_wsgi is if the function object being saved was itself a copy of some function object which is held outside of the module the function object was defined in. If the module holding the original function object was actually the WSGI application script file and it was reloaded because of the automatic script reloading mechanism, an attempt to pickle the object will fail. This is because the original function object which had been copied from will have been replaced by a new one when the script was reloaded. This sort of problem, although it will not occur for an instance of a class, will occur for the class object itself:: >>> class B: pass ... >>> b=B() >>> pickle.dumps(b) '(i__main__\nB\np0\n(dp1\nb.' >>> del B >>> pickle.dumps(b) '(i__main__\nB\np0\n(dp1\nb.' >>> class B: pass ... >>> pickle.dumps(B) 'c__main__\nB\np0\n.' >>> C = B >>> pickle.dumps(C) 'c__main__\nB\np0\n.' >>> del B >>> pickle.dumps(C) Traceback (most recent call last): ... pickle.PicklingError: Can't pickle : it's not found as __main__.B Note though that for the case of a class instance, an appropriate class object must exist at the same location when the serialised object is being restored:: >>> class B: pass ... >>> b = B() >>> pickle.loads(pickle.dumps(b)) <__main__.B instance at 0x41e40> >>> del B >>> pickle.loads(pickle.dumps(b)) Traceback (most recent call last): ... AttributeError: 'module' object has no attribute 'B' Unpacking And Module Names -------------------------- The second problem derives from how the mod_wsgi script loading mechanism does not make use of the standard Python module importing mechanism. This is necessary as the standard Python module importing mechanism requires every loaded module to have a unique name, with each module residing in ``sys.modules`` under that name. Further, that name must be able to be used to import the module. The mod_wsgi script loading mechanism does not place modules in ``sys.modules`` under their original name so as to allow multiple modules with the same name in different directories and also to avoid having to use the ".py" extension for script files. The consequence though of modules not residing in ``sys.modules`` under their original name is that function objects and class objects within such a module may not be able to converted back into objects from their serialised form. This is because "pickle" when attempting to import a module automatically if the module isn't already loaded will not be able to load the WSGI application script file. The problem can be seen in the following output from an interactive Python session:: >>> exec "class C: pass" in m.__dict__ >>> c = m.C() >>> pickle.dumps(c) '(im\nC\np0\n(dp1\nb.' >>> pickle.loads(pickle.dumps(c)) >>> del sys.modules["m"] >>> pickle.loads(pickle.dumps(c)) Traceback (most recent call last): ... ImportError: No module named m Summary Of Limitations ---------------------- Although the first problem described above could be avoided by disabling script reloading, there is no way to work around the second problem resulting from how mod_wsgi names modules when stored in ``sys.modules``. In practice, what this means is that neither function objects, class objects or instances of classes which are defined in a WSGI application script file should be stored using the "pickle" module. In order to ensure that no strange problems at all are likely to occur, it is suggested that only basic builtin Python types, ie., scalars, tuples, lists and dictionaries, be stored using the "pickle" module from a WSGI application script file. That is, avoid any type of object which has user defined code associated with it. Note that this limitation only applies to the WSGI application script file, it doesn't apply to normal Python modules imported using the Python "import" statement. mod_wsgi-5.0.0/docs/user-guides/processes-and-threading.rst000066400000000000000000000532351452636074700240010ustar00rootroot00000000000000======================= Processes And Threading ======================= Apache can operate in a number of different modes dependent on the platform being used and the way in which it is configured. This ranges from multiple processes being used, with only one request being handled at a time within each process, to one or more processes being used, with concurrent requests being handled in distinct threads executing within those processes. The combinations possible are further increased by mod_wsgi through its ability to create groups of daemon processes to which WSGI applications can be delegated. As with Apache itself, each process group can consist of one or more processes and optionally make use of multithreading. Unlike Apache, where some combinations are only possible based on how Apache was compiled, the mod_wsgi daemon processes can operate in any mode based only on runtime configuration settings. This article provides background information on how Apache and mod_wsgi makes use of processes and threads to handle requests, and how Python sub interpreters are used to isolate WSGI applications. The implications of the various modes of operation on data sharing is also discussed. WSGI Process/Thread Flags ------------------------- Although Apache can make use of a combination of processes and/or threads to handle requests, this is not unique to the Apache web server and the WSGI specification acknowledges this fact. This acknowledgement is in the form of specific key/value pairs which must be supplied as part of the WSGI environment to a WSGI application. The purpose of these key/value pairs is to indicate whether the underlying web server does or does not make use of multiple processes and/or multiple threads to handle requests. These key/value pairs are defined as follows in the WSGI specification. *wsgi.multithread* This value should evaluate true if the application object may be simultaneously invoked by another thread in the same process, and should evaluate false otherwise. *wsgi.multiprocess* This value should evaluate true if an equivalent application object may be simultaneously invoked by another process, and should evaluate false otherwise. A WSGI application which is not written to take into consideration the different combinations of process and threading models may not be portable and potentially may not be robust when deployed to an alternate hosting platform or configuration. Although you may not need an application or application component to work under all possible combinations for these values initially, it is highly recommended that any application component still be designed to work under any of the different operating modes. If for some reason this cannot be done due to the very nature of what functionality the component provides, the component should validate if it is being run within a compatible configuration and return a HTTP 500 internal server error response if it isn't. An example of a component for which restrictions would apply is one providing an interactive browser based debugger session in response to an internal failure of a WSGI application. In this scenario, for the component to work correctly, subsequent HTTP requests must be processed by the same process. As such, the component can only be used with a web server that uses a single process. In other words, the value of 'wsgi.multiprocess' would have to evaluate to be false. Multi-Processing Modules ------------------------ The main factor which determines how Apache operates is which multi-processing module (MPM) is built into Apache at compile time. Although runtime configuration can customise the behaviour of the MPM, the choice of MPM will dictate whether or not multithreading is available. On UNIX based systems, Apache defaults to being built with the 'prefork' MPM. If Apache 1.3 is being used this is actually the only choice, but for later versions of Apache, this can be overridden at build time by supplying an appropriate value in conjunction with the ``--with-mpm`` option when running the 'configure' script for Apache. The main alternative to the 'prefork' MPM which can be used on UNIX systems is the 'worker' MPM. If you are unsure which MPM is built into Apache, it can be determined by running the Apache web server executable with the ``-V`` option. The output from running the web server executable with this option will be information about how it was configured when built:: Server version: Apache/2.2.1 Server built: Mar 4 2007 20:48:15 Server's Module Magic Number: 20051115:1 Server loaded: APR 1.2.6, APR-Util 1.2.6 Compiled using: APR 1.2.6, APR-Util 1.2.6 Architecture: 32-bit Server MPM: Worker threaded: yes (fixed thread count) forked: yes (variable process count) Server compiled with.... -D APACHE_MPM_DIR="server/mpm/worker" -D APR_HAS_MMAP -D APR_HAVE_IPV6 (IPv4-mapped addresses enabled) -D APR_USE_SYSVSEM_SERIALIZE -D APR_USE_PTHREAD_SERIALIZE -D SINGLE_LISTEN_UNSERIALIZED_ACCEPT -D APR_HAS_OTHER_CHILD -D AP_HAVE_RELIABLE_PIPED_LOGS -D DYNAMIC_MODULE_LIMIT=128 -D HTTPD_ROOT="/usr/local/apache-2.2" -D SUEXEC_BIN="/usr/local/apache-2.2/bin/suexec" -D DEFAULT_SCOREBOARD="logs/apache_runtime_status" -D DEFAULT_ERRORLOG="logs/error_log" -D AP_TYPES_CONFIG_FILE="conf/mime.types" -D SERVER_CONFIG_FILE="conf/httpd.conf" Which MPM is being used can be determined from the 'Server MPM' field. On the Windows platform the only available MPM is 'winnt'. The UNIX 'prefork' MPM ---------------------- This MPM is the most commonly used. It was the only mode of operation available in Apache 1.3 and is still the default mode on UNIX systems in later versions of Apache. In this configuration, the main Apache process will at startup create multiple child processes. When a request is received by the parent process, it will be processed by which ever of the child processes is ready. Each child process will only handle one request at a time. If another request arrives at the same time, it will be handled by the next available child process. When it is detected that the number of available processes is running out, additional child processes will be created as necessary. If a limit is specified as to the number of child processes which may be created and the limit is reached, plus there are sufficient requests arriving to fill up the listener socket queue, the client may instead receive an error resulting from not being able to establish a connection with the web server. Where additional child processes have to be created due to a peak in the number of current requests arriving and where the number of requests has subsequently dropped off, the excess child processes may be shutdown and killed off. Child processes may also be shutdown and killed off after they have handled some set number of requests. Although threads are not used to service individual requests, this does not preclude an application from creating separate threads to perform some specific task. For the typical 'prefork' configuration where multiple processes are used, the WSGI environment key/value pairs indicating how processes and threads are being used will be as follows. *wsgi.multithread* False *wsgi.multiprocess* True Because multiple processes are being used, a WSGI middleware component such as the interactive browser based debugger described would not be able to be used. If during development and testing of a WSGI application, use of such a debugger was required, the only option which would exist would be to limit the number of processes being used. This could be achieved using the Apache configuration:: StartServers 1 ServerLimit 1 With this configuration, only one process will be started, with no additional processes ever being created. The WSGI environment key/value pairs indicating how processes and threads are being used will for this configuration be as follows. *wsgi.multithread* False *wsgi.multiprocess* False In effect, this configuration has the result of serialising all requests through a single process. This will allow an interactive browser based debugger to be used, but may prevent more complex WSGI applications which make use of AJAX techniques from working. This could occur where a web page initiates a sequence of AJAX requests and expects later requests to be able to complete while a response for an initial request is still pending. In other words, problems may occur where requests overlap, as subsequent requests will not be able to be executed until the initial request has completed. The UNIX 'worker' MPM --------------------- The 'worker' MPM is similar to 'prefork' mode except that within each child process there will exist a number of worker threads. Instead of a request only being able to be processed by the next available idle child process and with the handling of the request being the only thing the child process is then doing, the request may be processed by a worker thread within a child process which already has other worker threads handling other requests at the same time. It is possible that a WSGI application could be executed at the same time from multiple worker threads within the one child process. This means that multiple worker threads may want to access common shared data at the same time. As a consequence, such common shared data must be protected in a way that will allow access and modification in a thread safe manner. Normally this would necessitate the use of some form of synchronisation mechanism to ensure that only one thread at a time accesses and or modifies the common shared data. If all worker threads within a child process were busy when a new request arrives the request would be processed by an idle worker thread in another child process. Apache may still create new child processes on demand if necessary. Apache may also still shutdown and kill off excess child processes, or child processes that have handled more than a set number of requests. Overall, use of 'worker' MPM will result in less child processes needing to be created, but resource usage of individual child processes will be greater. On modern computer systems, the 'worker' MPM would in general be the prefered MPM to use and should if possible be used in preference to the 'prefork' MPM. Although contention for the global interpreter lock (GIL) in Python can causes issues for pure Python programs, it is not generally as big an issue when using Python within Apache. This is because all the underlying infrastructure for accepting requests and mapping the URL to a WSGI application, as well as the handling of requests against static files are all performed by Apache in C code. While this code is being executed the thread will not be holding the Python GIL, thus allowing a greater level of overlapping execution where a system has multiple CPUs or CPUs with multiple cores. This ability to make good use of more than processor, even when using multithreading, is further enchanced by the fact that Apache uses multiple processes for handling requests and not just a single process. Thus, even when there is some contention for the GIL within a specific process, it doesn't stop other processes from being able to run as the GIL is only local to a process and does not extend across processes. For the typical 'worker' configuration where multiple processes and multiple threads are used, the WSGI environment key/value pairs indicating how processes and threads are being used will be as follows. *wsgi.multithread* True *wsgi.multiprocess* True Similar to the 'prefork' MPM, the number of processes can be restricted to just one if required using the configuration:: StartServers 1 ServerLimit 1 With this configuration, only one process will be started, with no additional processes ever being created, but that one process would still make use of multiple threads. The WSGI environment key/value pairs indicating how processes and threads are being used will for this configuration be as follows. *wsgi.multithread* True *wsgi.multiprocess* False Because multiple threads are being used, there would be no problem with overlapping requests generated by an AJAX based web page. The Windows 'winnt' MPM ----------------------- On the Windows platform the 'winnt' MPM is the only option available. With this MPM, multiple worker threads within a child process are used to handle all requests. The 'winnt' MPM is different to the 'worker' mode however in that there is only one child process. At no time are additional child processes created, or that one child process shutdown and killed off, except where Apache as a whole is being stopped or restarted. Because there is only one child process, the maximum number of threads used is much greater. The WSGI environment key/value pairs indicating how processes and threads are being used will for this configuration be as follows. *wsgi.multithread* True *wsgi.multiprocess* False The mod_wsgi Daemon Processes ----------------------------- When using 'daemon' mode of mod_wsgi, each process group can be individually configured so as to run in a manner similar to either 'prefork', 'worker' or 'winnt' MPMs for Apache. This is achieved by controlling the number of processes and threads within each process using the 'processes' and 'threads' options of the WSGIDaemonProcess directive. To emulate the same process/thread model as the 'winnt' MPM, that is, a single process with multiple threads, the following configuration would be used:: WSGIDaemonProcess example threads=25 The WSGI environment key/value pairs indicating how processes and threads are being used will for this configuration be as follows. *wsgi.multithread* True *wsgi.multiprocess* False Note that by not specifying the 'processes' option only a single process is created within the process group. Although providing 'processes=1' as an option would also result in a single process being created, this has a slightly different meaning and so you should only do this if necessary. The difference between not specifying the 'processes' option and defining 'processes=1' will be that WSGI environment attribute called 'wsgi.multiprocess' will be set to be True when the 'processes' option is defined, whereas not providing the option at all will result in the attribute being set to be False. This distinction is to allow for where some form of mapping mechanism might be used to distribute requests across multiple process groups and thus in effect it is still a multiprocess application. In other words, if you use the configuration:: WSGIDaemonProcess example processes=1 threads=25 the WSGI environment key/value pairs indicating how processes and threads are being used will instead be: *wsgi.multithread* True *wsgi.multiprocess* True If you need to ensure that 'wsgi.multiprocess' is False so that interactive debuggers do not complain about an incompatible configuration, simply do not specify the 'processes' option and allow the default behaviour of a single daemon process to apply. To emulate the same process/thread model as the 'worker' MPM, that is, multiple processes with multiple threads, the following configuration would be used:: WSGIDaemonProcess example processes=2 threads=25 The WSGI environment key/value pairs indicating how processes and threads are being used will for this configuration be as follows. *wsgi.multithread* True *wsgi.multiprocess* True To emulate the same process/thread model as the 'prefork' MPM, that is, multiple processes with only a single thread running in each, the following configuration would be used:: WSGIDaemonProcess example processes=5 threads=1 The WSGI environment key/value pairs indicating how processes and threads are being used will for this configuration be as follows. *wsgi.multithread* False *wsgi.multiprocess* True Note that when using mod_wsgi daemon processes, the processes are only used to execute the Python based WSGI application. The processes are not in any way used to serve static files, or host applications implemented in other languages. Unlike the normal Apache child processes when 'embedded' mode of mod_wsgi is used, the configuration as to the number of daemon processes within a process group is fixed. That is, when the server experiences additional load, no more daemon processes are created than what is defined. You should therefore always plan ahead and make sure the number of processes and threads defined is adequate to cope with the expected load. Sharing Of Global Data ---------------------- When the 'winnt' MPM is being used, or the 'prefork' or 'worker' MPM are forced to run with only a single process, all request handlers within a specific WSGI application will always be accessing the same global data. This global data will persist in memory until Apache is shutdown or restarted, or in the case of the 'prefork' or 'worker' MPM until the child process is recycled due to reaching a predefined request limit. This ability to access the same global data and for that data to persist for the lifetime of the child process is not present when either of the 'prefork' or 'worker' MPM are used in multiprocess mode. In other words, where the WSGI environment key/value pair indicating how processes are used is set to: *wsgi.multiprocess* True This is because request handlers can execute within the context of distinct child processes, each with their own set of global data unique to that child process. The consequences of this are that you cannot assume that separate invocations of a request handler will have access to the same global data if that data only resides within the memory of the child process. If some set of global data must be accessible by all invocations of a handler, that data will need to be stored in a way that it can be accessed from multiple child processes. Such sharing could be achieved by storing the global data within an external database, the filesystem or in shared memory accessible by all child processes. Since the global data will be accessible from multiple child processes at the same time, there must be adequate locking mechanisms in place to prevent distinct child processes from trying to modify the same data at the same time. The locking mechanisms need to also be able to deal with the case of multiple threads within one child process accessing the global data at the same time, as will be the case for the 'worker' and 'winnt' MPM. Python Sub Interpreters ----------------------- The default behaviour of mod_wsgi is to create a distinct Python sub interpreter for each WSGI application. Thus, where Apache is being used to host multiple WSGI applications a process will contain multiple sub interpreters. When Apache is run in a mode whereby there are multiple child processes, each child process will contain sub interpreters for each WSGI application. When a sub interpreter is created for a WSGI application, it would then normally persist for the life of the process. The only exception to this would be where interpreter reloading is enabled, in which case the sub interpreter would be destroyed and recreated when the WSGI application script file has been changed. For the sub interpreter created for each WSGI application, they will each have their own set of Python modules. In other words, a change to the global data within the context of one sub interpreter will not be seen from the sub interpreter corresponding to a different WSGI application. This will be the case whether or not the sub interpreters are in the same process. This behaviour can be modified and multiple applications grouped together using the WSGIApplicationGroup directive. Specifically, the directive indicates that the marked WSGI applications should be run within the context of a common sub interpreter rather than being run in their own sub interpreters. By doing this, each WSGI application will then have access to the same global data. Do note though that this doesn't change the fact that global data will not be shared between processes. The only other way of sharing data between sub interpreters within the one child process would be to use an external data store, or a third party C extension module for Python which allows communication or sharing of data between multiple interpreters within the same process. Building A Portable Application ------------------------------- Taking into consideration the different process models used by Apache and the manner in which interpreters are used by mod_wsgi, to build a portable and robust application requires the following therefore be satisified. 1. Where shared data needs to be visible to all application instances, regardless of which child process they execute in, and changes made to the data by one application are immediately available to another, including any executing in another child process, an external data store such as a database or shared memory must be used. Global variables in normal Python modules cannot be used for this purpose. 2. Access to and modification of shared data in an external data store must be protected so as to prevent multiple threads in the same or different processes from interfering with each other. This would normally be achieved through a locking mechanism visible to all child processes. 3. An application must be re-entrant, or simply put, be able to be called concurrently by multiple threads at the same time. Data which needs to exist for the life of the request, would need to be stored as stack based data, thread local data, or cached in the WSGI application environment. Global variables within the actual application module cannot be used for this purpose. 4. Where global data in a module local to a child process is still used, for example as a cache, access to and modification of the global data must be protected by local thread locking mechanisms. mod_wsgi-5.0.0/docs/user-guides/quick-configuration-guide.rst000066400000000000000000000325441452636074700243440ustar00rootroot00000000000000========================= Quick Configuration Guide ========================= This document describes the steps for configuring mod_wsgi for a basic WSGI application. If you are setting up mod_wsgi for the very first time, it is highly recommended that you follow the examples in this document. Make sure that you at least get the examples running to verify that mod_wsgi is working correctly before attempting to install any WSGI applications of your own. WSGI Application Script File ---------------------------- WSGI is a specification of a generic API for mapping between an underlying web server and a Python web application. WSGI itself is described by Python PEP 3333: * http://www.python.org/dev/peps/pep-3333/ The purpose of the WSGI specification is to provide a common mechanism for hosting a Python web application on a range of different web servers supporting the Python programming language. A very simple WSGI application, and the one which should be used for the examples in this document, is as follows:: def application(environ, start_response): status = '200 OK' output = b'Hello World!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] This sample application will need to be placed into what will be referred to as the WSGI application script file. For the examples presented here, the WSGI application will be run as the user that Apache runs as. As such, the user that Apache runs as must have read access to both the WSGI application script file and all the parent directories that contain it. Note that mod_wsgi requires that the WSGI application entry point be called 'application'. If you want to call it something else then you would need to configure mod_wsgi explicitly to use the other name. Thus, don't go arbitrarily changing the name of the function. If you do, even if you set up everything else correctly the application will not be found. Mounting The WSGI Application ----------------------------- There are a number of ways that a WSGI application hosted by mod_wsgi can be mounted against a specific URL. These methods are similar to how one would configure traditional CGI applications. The main approach entails explicitly declaring in the main Apache configuration file the URL mount point and a reference to the WSGI application script file. In this case the mapping is fixed, with changes only being able to be made by modifying the main Apache configuration and restarting Apache. When using mod_cgi to host CGI applications, this would be done using the ScriptAlias directive. For mod_wsgi, the directive is instead called WSGIScriptAlias:: WSGIScriptAlias /myapp /usr/local/www/wsgi-scripts/myapp.wsgi This directive can only appear in the main Apache configuration files. The directive can be used at server scope but would normally be placed within the VirtualHost container for a particular site. It cannot be used within either of the Location, Directory or Files container directives, nor can it be used within a ".htaccess" file. The first argument to the WSGIScriptAlias directive should be the URL mount point for the WSGI application. For this case the URL should not contain a trailing slash. The only exception to this is if the WSGI application is to be mounted at the root of the web server, in which case '/' would be used. The second argument to the WSGIScriptAlias directive should be an absolute pathname to the WSGI application script file. It is into this file that the sample WSGI application code should be placed. Note that an absolute pathname must be used for the WSGI application script file supplied as the second argument. It is not possible to specify an application by Python module name alone. A full path is used for a number of reasons, the main one being so that all the Apache access controls can still be applied to indicate who can actually access the WSGI application. Because the Apache access controls will apply, if the WSGI application is located outside of any directories already configured to be accessible to Apache, it will be necessary to tell Apache that files within that directory can be used. To do this the Directory directive must be used:: Order allow,deny Allow from all = 2.4> Require all granted Note that it is highly recommended that the WSGI application script file in this case NOT be placed within the existing DocumentRoot for your main Apache installation, or the particular site you are setting it up for. This is because if that directory is otherwise being used as a source of static files, the source code for your application might be able to be downloaded. You also should not use the home directory of a user account, as to do that would mean allowing Apache to serve up any files in that account. In this case any misconfiguration of Apache could end up exposing your whole account for downloading. It is thus recommended that a special directory be setup distinct from other directories and that the only thing in that directory be the WSGI application script file, and if necessary any support files it requires. A complete virtual host configuration for this type of setup would therefore be something like:: ServerName www.example.com ServerAlias example.com ServerAdmin webmaster@example.com DocumentRoot /usr/local/www/documents Order allow,deny Allow from all = 2.4> Require all granted WSGIScriptAlias /myapp /usr/local/www/wsgi-scripts/myapp.wsgi Order allow,deny Allow from all = 2.4> Require all granted After appropriate changes have been made Apache will need to be restarted. For this example, the URL 'http://www.example.com/myapp' would then be used to access the the WSGI application. Note that you obviously should substitute the paths and hostname with values appropriate for your system. Mounting At Root Of Site ------------------------ If instead you want to mount a WSGI application at the root of a site, simply list '/' as the mount point when configuring the WSGIScriptAlias directive:: WSGIScriptAlias / /usr/local/www/wsgi-scripts/myapp.wsgi Do note however that doing so will mean that any static files contained in the DocumentRoot will be hidden and requests against URLs pertaining to the static files will instead be processed by the WSGI application. In this situation it becomes necessary to remap using the Alias directive, any URLs for static files to the directory containing them:: Alias /robots.txt /usr/local/www/documents/robots.txt Alias /favicon.ico /usr/local/www/documents/favicon.ico Alias /media/ /usr/local/www/documents/media/ A complete virtual host configuration for this type of setup would therefore be something like:: ServerName www.example.com ServerAlias example.com ServerAdmin webmaster@example.com DocumentRoot /usr/local/www/documents Alias /robots.txt /usr/local/www/documents/robots.txt Alias /favicon.ico /usr/local/www/documents/favicon.ico Alias /media/ /usr/local/www/documents/media/ Order allow,deny Allow from all = 2.4> Require all granted WSGIScriptAlias / /usr/local/www/wsgi-scripts/myapp.wsgi Order allow,deny Allow from all = 2.4> Require all granted After appropriate changes have been made Apache will need to be restarted. For this example, the URL 'http://www.example.com/' would then be used to access the the WSGI application. Note that you obviously should substitute the paths and hostname with values appropriate for your system. Delegation To Daemon Process ---------------------------- By default any WSGI application will run in what is called embedded mode. That is, the application will be hosted within the Apache worker processes used to handle normal static file requests. When embedded mode is used, whenever you make changes to your WSGI application code you would generally have to restart the whole Apache web server in order for changes to be picked up. This can be inconvenient, especially if the web server is a shared resource hosting other web applications at the same time, or you don't have root access to be able to restart the server and rely on someone else to restart it. On UNIX systems when running Apache 2.X, an option which exists with mod_wsgi and that avoids the need to restart the whole Apache web server when code changes are made, is to use what is called daemon mode. In daemon mode a set of processes is created for hosting a WSGI application, with any requests for that WSGI application automatically being routed to those processes for handling. When code changes are made and it is desired that the daemon processes for the WSGI application be restarted, all that is required is to mark the WSGI application script file as modified by using the 'touch' command. To make use of daemon mode for WSGI applications hosted within a specific site, the WSGIDaemonProcess and WSGIProcessGroup directives would need to be defined. For example, to setup a daemon process group containing two multithreaded process one could use:: WSGIDaemonProcess example.com processes=2 threads=15 WSGIProcessGroup example.com A complete virtual host configuration for this type of setup would therefore be something like:: ServerName www.example.com ServerAlias example.com ServerAdmin webmaster@example.com DocumentRoot /usr/local/www/documents Alias /robots.txt /usr/local/www/documents/robots.txt Alias /favicon.ico /usr/local/www/documents/favicon.ico Alias /media/ /usr/local/www/documents/media/ Order allow,deny Allow from all = 2.4> Require all granted WSGIDaemonProcess example.com processes=2 threads=15 display-name=%{GROUP} WSGIProcessGroup example.com WSGIScriptAlias / /usr/local/www/wsgi-scripts/myapp.wsgi Order allow,deny Allow from all = 2.4> Require all granted After appropriate changes have been made Apache will need to be restarted. For this example, the URL 'http://www.example.com/' would then be used to access the the WSGI application. Note that you obviously should substitute the paths and hostname with values appropriate for your system. As mentioned previously, the daemon processes will be shutdown and restarted automatically if the WSGI application script file is modified. For the sample application presented in this document the whole application is in that file. For more complicated applications the WSGI application script file will be merely an entry point to an application being imported from other Python modules or packages. In this later case, although no change may be required to the WSGI application script file itself, it can still be touched to trigger restarting of the daemon processes in the event that any code in the separate modules or packages is changed. Note that only requests for the WSGI application are handled within the context of the daemon processes. Any requests for static files are still handled within the Apache worker processes. Debugging Any Problems ---------------------- To debug any problems one should take note of the type of error response being returned, but more importantly one should look at the Apache error logs for more detailed descriptions of a specific problem. Being new to mod_wsgi it is highly recommended that the default Apache LogLevel be increased from 'warn' to 'info':: LogLevel info When this is done mod_wsgi will output additional information regarding when daemon processes are created, when Python sub interpreters related to a group of WSGI applications are created and when WSGI application script files are loaded and/or reloaded. This information can be quite valuable in determining what problem may be occuring. Note that where the LogLevel directive may have been defined both in and outside of a VirtualHost directive, due to the VirtualHost declaring its own error logs, both instances of the LogLevel directive should be changed. This is because although the virtual host may have its own error log, some information is still logged to the main Apache error log and the LogLevel directive outside of the virtual host context needs to be changed for that additional information to be recorded. In other words, even if the VirtualHost has its own error log file, also look in the main Apache error log file for information as well. mod_wsgi-5.0.0/docs/user-guides/quick-installation-guide.rst000066400000000000000000000202121452636074700241630ustar00rootroot00000000000000======================== Quick Installation Guide ======================== This document describes the steps for installing mod_wsgi on a UNIX system from the original source code. Apache Requirements ------------------- Apache 2.0, 2.2 or 2.4 can be used. For Apache 2.0, 2.2 and 2.4, the single threaded 'prefork' or multithreaded 'worker' Apache MPMs can be used. For Apache 2.4 the 'event' MPM can also be used. The version of Apache and its runtime libraries must have been compiled with support for threading. On Linux systems, if Apache has been installed from a package repository, you must have installed the corresponding Apache "dev" package as well. For most Linux distributions, the "dev" package for Apache 2.X is "apache2-dev" where the corresponding Apache package was "apache2". Some systems however distinguish the "dev" package based on which MPM is used by Apache. As such, it may also be called "apache2-worker-dev" or "apache2-prefork-dev". If using Apache 2.X, do not mix things up and install "apache-dev" by mistake, which is the "dev" package for Apache 1.3 called just "apache". Python Requirements ------------------- Any Python 2.X version from Python 2.6 onwards can be used. For Python 3.X, you will need Python 3.3 or later. The version of Python being used must have been compiled with support for threading. On Linux systems, if Python has been installed from a package repository, you must have installed the corresponding Python "dev" package as well. Python should preferably be available as a shared library. If this is not the case then base runtime memory usage of mod_wsgi will be greater. Unpacking The Source Code ------------------------- Source code tar balls can be obtained from: * https://github.com/GrahamDumpleton/mod_wsgi/releases After having downloaded the tar ball for the version you want to use, unpack it with the command:: tar xvfz mod_wsgi-X.Y.tar.gz Replace 'X.Y' with the actual version number for that being used. Configuring The Source Code --------------------------- To setup the package ready for building run the "configure" script from within the source code directory:: ./configure The configure script will attempt to identify the Apache installation to use by searching in various standard locations for the Apache build tools included with your distribution called "apxs2" or "apxs". If not found in any of these standard locations, your PATH will be searched. Which Python installation to use will be determined by looking for the "python" executable in your PATH. If these programs are not in a standard location, they cannot be found in your PATH, or you wish to use alternate versions to those found, the ``--with-apxs`` and ``--with-python`` options can be used in conjunction with the "configure" script:: ./configure --with-apxs=/usr/local/apache/bin/apxs \ --with-python=/usr/local/bin/python On some Linux distributions, such as SUSE and CentOS, it will be necessary to use the ``--with-apxs`` option and specify either "/usr/sbin/apxs2-worker" or "/usr/sbin/apxs2-prefork". This is necessary as the Linux distribtions allow installation of "dev" packages for both Apache MPM variants at the same time, whereas other Linux distributions do not. If you have multiple versions of Python installed and you are not using that which is the default, you may have to organise the PATH inherited by the Apache application, which when run will result in Apache finding the alternate version. Alternatively, the WSGIPythonHome directive should be used to specify the exact location of the Python installation corresponding to the version of Python compiled against. If this is not done, the version of Python running within Apache may attempt to use the Python modules from the wrong version of Python. Building The Source Code ------------------------ Once the package has been configured, it can be built by running:: make If the mod_wsgi source code does not build successfully, see: * :doc:`../user-guides/installation-issues` If successful, the only product of the build process that needs to be installed is the Apache module itself. There are no separate Python code files as everything is done within C code compiled into the Apache module. To install the Apache module into the standard location for Apache modules as dictated by Apache for your installation, run:: make install Installation should be done as the 'root' user or 'sudo' command if appropriate. If you want to install the Apache module in a non standard location dictated by how your operating system distribution structures the configuration files and modules for Apache, you will need to copy the file manually into place. If installing the Apache module by hand, the file is called 'mod_wsgi.so'. The compiled Apache module can be found in the ".libs" subdirectory. The name of the file should be kept the same when copied into its appropriate location. Loading Module Into Apache -------------------------- Once the Apache module has been installed into your Apache installation's module directory, it is still necessary to configure Apache to actually load the module. Exactly how this is done and in which of the main Apache configuration files it should be placed, is dependent on which version of Apache you are using and may also be influenced by how your operating system's Apache distribution has organised the Apache configuration files. You may therefore need to check with any documentation for your operating system to see in what way the procedure may need to be modified. In the simplest case, all that is required is to add a line of the form:: LoadModule wsgi_module modules/mod_wsgi.so into the main Apache "httpd.conf" configuration file at the same point that other Apache modules are being loaded. The last option to the directive should either be an absolute path to where the mod_wsgi module file is located, or a path expressed relative to the root of your Apache installation. If you used "make" to install the package, see where it copied the file to work out what to set this value to. Restart Apache Web Server ------------------------- Having added the required directives you should perform a restart of Apache to check everything is okay. If you are using an unmodified Apache distribution from the Apache Software Foundation, a restart is performed using the 'apachectl' command:: apachectl restart If you see any sort of problem, or if you are upgrading from an older version of mod_wsgi, it is recommended you actually stop and the start Apache instead:: apachectl stop apachectl start Note that on many Linux distributions where Apache is prepackaged, the Apache software has been modified and as a result the 'apachectl' command may not work properly or the command may not be present. On these systems, you will need to use whatever is the sanctioned method for restarting system services. This may be via an 'init.d' script:: /etc/init.d/httpd stop /etc/init.d/httpd start or via some special service maintenance script. On Debian derived distributions, restarting Apache is usually done via the 'invoke-rc.d' command:: invoke-rc.d apache2 stop invoke-rc.d apache2 start On RedHat derived distributions, restarting Apache is usually done via the 'service' command:: service httpd stop service httpd start In nearly all cases the scripts used to restart Apache will need to be run as the 'root' user or via 'sudo'. In general, for any system where you are using a prepackaged version of Apache, it is wise to always check the documentation for that package or system to determine the correct way to restart the Apache service. This is because they often use a wrapper around 'apachectl', or replace it, with a script which performs additional actions. If all is okay, you should see a line of the form:: Apache/2.4.8 (Unix) mod_wsgi/4.4.21 Python/2.7 configured in the Apache error log file. Cleaning Up After Build ----------------------- To cleanup after installation, run:: make clean If you need to build the module for a different version of Apache, you should run:: make distclean and then rerun "configure" against the alternate version of Apache before attempting to run "make" again. mod_wsgi-5.0.0/docs/user-guides/registering-cleanup-code.rst000066400000000000000000000134641452636074700241470ustar00rootroot00000000000000======================== Registering Cleanup Code ======================== This document describes how to go about registering callbacks to perform cleanup tasks at the end of a request and when an application process is being shutdown. Cleanup At End Of Request ------------------------- To perform a cleanup task at the end of a request a couple of different approaches can be used dependent on the requirements. The first approach entails wrapping the calling of a WSGI application within a Python 'try' block, with the cleanup code being triggered from the 'finally' block:: def _application(environ, start_response): status = '200 OK' output = b'Hello World!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] def application(environ, start_response): try: return _application(environ, start_response) finally: # Perform required cleanup task. ... This might even be factored into a convenient WSGI middleware component:: class ExecuteOnCompletion1: def __init__(self, application, callback): self.__application = application self.__callback = callback def __call__(self, environ, start_response): try: return self.__application(environ, start_response) finally: self.__callback(environ) The WSGI environment passed in the 'environ' argument to the application could even be supplied to the cleanup callback as shown in case it needed to look at any configuration information or information passed back in the environment from the application. The application would then be replaced with an instance of this class initialised with a reference to the original application and a suitable cleanup function:: def cleanup(environ): # Perform required cleanup task. ... application = ExecuteOnCompletion1(_application, cleanup) Using this approach, the cleanup function will actually be called prior to the response content being consumed by mod_wsgi and written back to the client. As such, it is probably only suitable where a complete response is returned as an array of strings. It would not be suitable where a generator is being returned as the cleanup would be called prior to any strings being consumed from the generator. This would be problematic where the cleanup task was to close or delete some resource from which the generator was obtaining the response content. In order to have the cleanup task only executed after the complete response has been consumed, it would be necessary to wrap the result of the application within an instance of a purpose built generator like object. This object needs to yield each item from the response in turn, and when this object is cleaned up by virtue of the 'close()' method being called, it should in turn call 'close()' on the result returned from the application if necessary, and then call the supplied cleanup callback:: class Generator2: def __init__(self, iterable, callback, environ): self.__iterable = iterable self.__callback = callback self.__environ = environ def __iter__(self): for item in self.__iterable: yield item def close(self): try: if hasattr(self.__iterable, 'close'): self.__iterable.close() finally: self.__callback(self.__environ) class ExecuteOnCompletion2: def __init__(self, application, callback): self.__application = application self.__callback = callback def __call__(self, environ, start_response): try: result = self.__application(environ, start_response) except: self.__callback(environ) raise return Generator2(result, self.__callback, environ) Note that for a successfully completed request, since the cleanup task will be executed after the complete response has been written back to the client, if an error occurs there will be no evidence of this in the response seen by the client. As far as the client will be concerned everything will look okay. The only indication of an error will be found in the Apache error log. Both of the solutions above are not specific to mod_wsgi and should work with any WSGI hosting solution which complies with the WSGI specification. Cleanup On Process Shutdown --------------------------- To perform a cleanup task on shutdown of either an Apache child process when using 'embedded' mode of mod_wsgi, or of a daemon process when using 'daemon' mode of mod_wsgi, the standard Python 'atexit' module can be used:: import atexit def cleanup(): # Perform required cleanup task. ... atexit.register(cleanup) Such a registered cleanup function will also be called if the 'Interpreter' reload mechanism is enabled and the Python sub interpreter in which the cleanup function was registered was destroyed. Note that although mod_wsgi will ensure that cleanup functions registered using the 'atexit' module will be called correctly, this solution may not be portable to all WSGI hosting solutions. Also be aware that although one can register a cleanup function to be called on process shutdown, this is no absolute guarantee that it will be called. This is because a process may crash, or it may be forcibly killed off by Apache if it takes too long to shutdown normally. As a result, an application should not be dependent on cleanup functions being called on process shutdown and an application must have some means of detecting an abnormal shutdown when it is started up and recover from it automatically. mod_wsgi-5.0.0/docs/user-guides/reloading-source-code.rst000066400000000000000000000507031452636074700234370ustar00rootroot00000000000000===================== Reloading Source Code ===================== This document contains information about mechanisms available in mod_wsgi for automatic reloading of source code when an application is changed and any issues related to those mechanisms. Embedded Mode Vs Daemon Mode ---------------------------- What is achievable in the way of automatic source code reloading depends on which mode your WSGI application is running. If your WSGI application is running in embedded mode then what happens when you make code changes is largely dictated by how Apache works, as it controls the processes handling requests. In general, if using embedded mode you will have no choice but to manually restart Apache in order for code changes to be used. If using daemon mode, because mod_wsgi manages directly the processes handling requests and in which your WSGI application runs, there is more avenue for performing automatic source code reloading. As a consequence, it is important to understand what mode your WSGI application is running in. If you are running on Windows, are using Apache 1.3, or have not used WSGIDaemonProcess/WSGIProcessGroup directives to delegate your WSGI application to a mod_wsgi daemon mode process, then you will be using embedded mode. If you are not sure whether you are using embedded mode or daemon mode, then substitute your WSGI application entry point with:: def application(environ, start_response): status = '200 OK' if not environ['mod_wsgi.process_group']: output = u'EMBEDDED MODE' else: output = u'DAEMON MODE' response_headers = [('Content-Type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output.encode('UTF-8')] If your WSGI application is running in embedded mode, this will output to the browser 'EMBEDDED MODE'. If your WSGI application is running in daemon mode, this will output to the browser 'DAEMON MODE'. Reloading In Embedded Mode -------------------------- However you have configured Apache to mount your WSGI application, you will have a script file which contains the entry point for the WSGI application. This script file is not treated exactly like a normal Python module and need not even use a '.py' extension. It is even preferred that a '.py' extension not be used for reasons described below. For embedded mode, one of the properties of the script file is that by default it will be reloaded whenever the file is changed. The primary intent with the file being reloaded is to provide a second chance at getting any configuration in it and the mapping to the application correct. If the script weren't reloaded in this way, you would need to restart Apache even for a trivial change to the script file. Do note though that this script reloading mechanism is not intended as a general purpose code reloading mechanism. Only the script file itself is reloaded, no other Python modules are reloaded. This means that if modifying normal Python code files which are used by your WSGI application, you will need to trigger a restart of Apache. For example, if you are using Django in embedded mode and needed to change your 'settings.py' file, you would still need to restart Apache. That only the script file and not the whole process is reloaded also has a number of implications and imposes certain restrictions on what code in the script file can do or how it should be implemented. The first issue is that when the script file is imported, if the code makes modifications to ``sys.path`` or other global data structures and the changes are additive, checks should first be made to ensure that the change has not already been made, else duplicate data will be added every time the script file is reloaded. This means that when updating ``sys.path``, instead of using:: import sys sys.path.append('/usr/local/wsgi/modules') the more correct way would be to use:: import sys path = '/usr/local/wsgi/modules' if path not in sys.path: sys.path.append(path) This will ensure that the path doesn't get added multiple times. Even where the script file is named so as to have a '.py' extension, that the script file is not treated like a normal module means that you should never try to import the file from another code file using the 'import' statement or any other import mechanism. The easiest way to avoid this is not use the '.py' extension on script files or never place script files in a directory which is located on the standard module search path, nor add the directory containing the script into ``sys.path`` explicitly. If an attempt is made to import the script file as a module the result will be that it will be loaded a second time as an independent module. This is because script files are loaded under a module name which is keyed to the full absolute path for the script file and not just the basename of the file. Importing the script file directly and accessing it will therefore not result in the same data being accessed as exists in the script file when loaded. Because the script file is not treated like a normal Python module also has implications when it comes to using the "pickle" module in conjunction with objects contained within the script file. In practice what this means is that neither function objects, class objects or instances of classes which are defined in the script file should be stored using the "pickle" module. The technical reasons for the limitations on the use of the "pickle" module in conjunction with objects defined in the script file are further discussed in the document :doc:`../user-guides/issues-with-pickle-module`. The act of reloading script files also means that any data previously held by the module corresponding to the script file will be deleted. If such data constituted handles to database connections, and the connections are not able to clean up themselves when deleted, it may result in resource leakage. One should therefore be cautious of what data is kept in a script file. Preferably the script file should only act as a bridge to code and data residing in a normal Python module imported from an entirely different directory. Restarting Apache Processes --------------------------- As explained above, the only facility that mod_wsgi provides for reloading source code files in embedded mode, is the reloading of just the script file providing the entry point for your WSGI application. If you don't have a choice but to use embedded mode and still desire some measure of automatic source code reloading, one option available which works for both Windows and UNIX systems is to force Apache to recycle the Apache server child process that handles the request automatically after the request has completed. To enable this, you need to modify the value of the MaxRequestsPerChild directive in the Apache configuration. Normally this would be set to a value of '0', indicating that the process should never be restarted as a result of the number of requests processed. To have it restart a process after every request, set it to the value '1' instead:: MaxRequestsPerChild 1 Do note however that this will cause the process to be restarted after any request. That is, the process will even be restarted if the request was for a static file or a PHP application and wasn't even handled by your WSGI application. The restart will also occur even if you have made no changes to your code. Because a restart happens regardless of the request type, using this method is not recommended. Because of how the Apache server child processes are monitored and restarts handled, it is technically possible that this method will yield performance which is worse than CGI scripts. For that reason you may even be better off using a CGI/WSGI bridge to host your WSGI application. At least that way the handling of other types of requests, such as for static files and PHP applications will not be affected. Reloading In Daemon Mode ------------------------ If using mod_wsgi daemon mode, what happens when the script file is changed is different to what happens in embedded mode. In daemon mode, if the script file changed, rather than just the script file being reloaded, the daemon process which contains the application will be shutdown and restarted automatically. Detection of the change in the script file will occur at the time of the first request to arrive after the change has been made. The way that the restart is performed does not affect the handling of the request, with it still being processed once the daemon process has been restarted. In the case of there being multiple daemon processes in the process group, then a cascade effect will occur, with successive processes being restarted until the request is again routed to one of the newly restarted processes. In this way, restarting of a WSGI application when a change has been made to the code is a simple matter of touching the script file if daemon mode is being used. Any daemon processes will then automatically restart without the need to restart the whole of Apache. So, if you are using Django in daemon mode and needed to change your 'settings.py' file, once you have made the required change, also touch the script file containing the WSGI application entry point. Having done that, on the next request the process will be restarted and your Django application reloaded. Restarting Daemon Processes --------------------------- If you are using daemon mode of mod_wsgi, restarting of processes can to a degree also be controlled by a user, or by the WSGI application itself, without restarting the whole of Apache. To force a daemon process to be restarted, if you are using a single daemon process with many threads for the application, then you can embed a page in your application (password protected hopefully), that sends an appropriate signal to itself. This should only be done for daemon processes and not within the Apache child processes, as sending such a signal within a child process may interfere with the operation of Apache. That the code is executing within a daemon process can be determined by checking the 'mod_wsgi.process_group' variable in the WSGI environment passed to the application. The value will be non empty if a daemon process:: if environ['mod_wsgi.process_group'] != '': import signal, os os.kill(os.getpid(), signal.SIGINT) This will cause the daemon process your application is in to shutdown. The Apache process supervisor will then automatically restart your process ready for subsequent requests. On the restart it will pick up your new code. This way you can control a reload from your application through some special web page specifically for that purpose. You can also send this signal from an external application, but a problem there may be identifying which process to send the signal to. If you are running the daemon process(es) as a distinct user/group to Apache and each application is running as a different user then you could just look for the Apache (httpd) processes owned by the user the application is running as, as opposed to the Apache user, and send them all signals. If the daemon process is running as the same user as Apache or there are distinct applications running in different daemon processes but as the same user, knowing which daemon processes to send the signal may be harder to determine. Either way, to make it easier to identify which processes belong to a daemon process group, you can use the 'display-name' option to the WSGIDaemonProcess to name the process. On many platforms, when this option is used, that name will then appear in the output from the 'ps' command and not the name of the actual Apache server binary. Monitoring For Code Changes --------------------------- The use of signals to restart a daemon process could also be employed in a mechanism which automatically detects changes to any Python modules or dependent files. This could be achieved by creating a thread at startup which periodically looks to see if file timestamps have changed and trigger a restart if they have. Example code for such an automatic restart mechanism which is compatible with how mod_wsgi works is shown below:: from __future__ import print_function import os import sys import time import signal import threading import atexit try: import Queue as queue except ImportError: import queue _interval = 1.0 _times = {} _files = [] _running = False _queue = queue.Queue() _lock = threading.Lock() def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr) print('%s Triggering process restart.' % prefix, file=sys.stderr) os.kill(os.getpid(), signal.SIGINT) def _modified(path): try: # If path doesn't denote a file and were previously # tracking it, then it has been removed or the file type # has changed so force a restart. If not previously # tracking the file then we can ignore it as probably # pseudo reference such as when file extracted from a # collection of modules contained in a zip file. if not os.path.isfile(path): return path in _times # Check for when file last modified. mtime = os.stat(path).st_mtime if path not in _times: _times[path] = mtime # Force restart when modification time has changed, even # if time now older, as that could indicate older file # has been restored. if mtime != _times[path]: return True except: # If any exception occured, likely that file has been # been removed just before stat(), so force a restart. return True return False def _monitor(): while 1: # Check modification times on all files in sys.modules. for module in sys.modules.values(): if not hasattr(module, '__file__'): continue path = getattr(module, '__file__') if not path: continue if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']: path = path[:-1] if _modified(path): return _restart(path) # Check modification times on files which have # specifically been registered for monitoring. for path in _files: if _modified(path): return _restart(path) # Go to sleep for specified interval. try: return _queue.get(timeout=_interval) except: pass _thread = threading.Thread(target=_monitor) _thread.setDaemon(True) def _exiting(): try: _queue.put(True) except: pass _thread.join() atexit.register(_exiting) def track(path): if not path in _files: _files.append(path) def start(interval=1.0): global _interval if interval < _interval: _interval = interval global _running _lock.acquire() if not _running: prefix = 'monitor (pid=%d):' % os.getpid() print('%s Starting change monitor.' % prefix, file=sys.stderr) _running = True _thread.start() _lock.release() This would be used by importing into the script file the Python module containing the above code, starting the monitoring system and adding any additional non Python files which should be tracked:: import os import monitor monitor.start(interval=1.0) monitor.track(os.path.join(os.path.dirname(__file__), 'site.cf')) def application(environ, start_response): ... Where needing to add many non Python files in a directory hierarchy, such as template files which would otherwise be cached within the running process, the ``os.path.walk()`` function could be used to traverse all files and add required files based on extension or other criteria using the 'track()' function. This mechanism would generally work adequately where a single daemon process is used within a process group. You would need to be careful however when multiple daemon processes are used. This is because it may not be possible to synchronise the checks exactly across all of the daemon processes. As a result you may end up with the daemon processes running a mixture of old and new code until they all synchronise with the new code base. This problem can be minimised by defining a short interval time between scans, however that will increase the overhead of the checks. Using such an approach may in some cases be useful if using mod_wsgi as a development platform. It certainly would not be recommended you use this mechanism for a production system. The reasons for not using it on a production system is due to the additional overhead and chance that daemon processes are restarted when you are not expecting them to be. For example, in a production environment where requests are coming in all the time, you do not want a restart triggered when you are part way through making a set of changes which cover multiple files as likely then that an inconsistent set of code will be loaded and the application will fail. Note that you should also not use this mechanism on a system where you have configured mod_wsgi to preload your WSGI application as soon as the daemon process has started. If you do that, then the monitor thread will be recreated immediately and so for every single code change on a preloaded file you make, the daemon process will be restarted, even if there is no intervening request. If preloading was really required, the example code would need to be modified so as to not use signals to restart the daemon process, but reset to zero the variable saved away in the WSGI script file that records the modification time of the script file. This will have the affect of delaying the restart until the next request has arrived. Because that variable holding the modification time is an internal implementation detail of mod_wsgi and not strictly part of its published API or behaviour, you should only use that approach if it is warranted. Restarting Windows Apache ------------------------- On the Windows platform there is no daemon mode only embedded mode. The MPM used on Apache is the 'winnt' MPM. This MPM is like the worker MPM on UNIX systems except that there is only one process. Being embedded mode, modifying the WSGI script file only results in the WSGI script file itself being reloaded, the process as a whole is not reloaded. Thus there is no way normally through modifying the WSGI script file or any other Python code file used by the application, of having the whole application reloaded automatically. The recipe in the previous section can be used with daemon mode on UNIX systems to implement an automated scheme for restarting the daemon processes when any code change is made, but because Windows lacks the 'fork()' system call daemon mode isn't supported in the first place. Thus, the only way one can have code changes picked up on Windows is to restart Apache as a whole. Although a full restart is required, Apache on Windows only uses a single child server process and so the impact isn't as significant as on UNIX platforms, where many processes may need to be shutdown and restarted. With that in mind, it is actually possible to modify the prior recipe for restarting a daemon process to restart Apache itself. To achieve this slight of hand, it is necessary to use the Python 'ctypes' module to get access to a special internal Apache function which is available in the Windows version of Apache called 'ap_signal_parent()'. The required change to get this to work is to replace the restart function in the previous code with the following:: def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() print('%s Change detected to \'%s\'.' % (prefix, path), file=sys.stderr) print('%s Triggering Apache restart.' % prefix, file=sys.stderr) import ctypes ctypes.windll.libhttpd.ap_signal_parent(1) Other than that, the prior code would be used exactly as before. Now when any change is made to Python code used by the application or any other monitored files, Apache will be restarted automatically for you. As before, probably recommended that this only be used during development and not on a production system. mod_wsgi-5.0.0/docs/user-guides/virtual-environments.rst000066400000000000000000000513761452636074700235070ustar00rootroot00000000000000==================== Virtual Environments ==================== This document contains information about how to use Python virtual environments with mod_wsgi. You can use a Python virtual environment created using `virtualenv`_ and `virtualenvwrapper`_, or if using Python 3, the ``pyvenv`` or ``python -m venv`` commands. The purpose of a Python virtual environments is to allow one to create multiple distinct Python environments for the same version of Python, but with different sets of Python modules and packages installed. It is recommended that you always use Python virtual environments and not install additional Python packages direct into your Python installation. A Python virtual environment is also required where it is necessary to run multiple WSGI applications which have conflicting requirements as to what version of a Python module or package needs to be installed. They can also be used when distinct mod_wsgi daemon process groups are used to host WSGI applications for different users and each user needs to be able to separately install their own Python modules and packages. How you configure mod_wsgi or setup your WSGI application script file for a Python virtual environment will depend on your specific requirements. The more common scenarios are explained below. Location of the Virtual Environment ----------------------------------- Whichever method you use to create a Python virtual environment, before you use it with mod_wsgi, you should validate what the location of the Python virtual environment is. If using `virtualenvwrapper`_ this may be a non obvious directory hidden away under your home directory. The way to determine the location of the Python virtual environment is to activate the Python virtual environment from an interactive shell so it is being used, and then run the command:: python -c 'import sys; print(sys.prefix)' This will output the directory path you will use when setting up mod_wsgi to use the Python virtual environment. For the purposes of the examples below, it is assumed the location of any Python virtual environments are under the ``/usr/local/venvs`` directory. A specific Python virtual environment may thus return for ``sys.prefix``:: /usr/local/venvs/example Note that this should be the root directory of the Python virtual environment, which in turn contains the ``bin`` and ``lib`` directories for the Python virtual environment. It is a common mistake when setting up a Python virtual environment with mod_wsgi to use the full path to the ``python`` executable instead of the root directory. That will not work, so do not use the path for the ``python`` executable as the location of the Python virtual environment, it has to be the root directory. Do be aware that the user that Apache runs your code as will need to be able to access the Python virtual environment. On some Linux distributions, the home directory of a user account is not accessible to other users. Rather than change the permissions on your home directory, it might be better to consider locating your WSGI application code and any Python virtual environment outside of your home directory. Virtual Environment and Python Version -------------------------------------- When using a Python virtual environment with mod_wsgi, it is very important that it has been created using the same Python installation that mod_wsgi was originally compiled for. It is not possible to use a Python virtual environment to force mod_wsgi to use a different Python version, or even a different Python installation. You cannot for example force mod_wsgi to use a Python virtual environment created using Python 3.5 when mod_wsgi was originally compiled for Python 2.7. This is because the Python library for the Python installation it was originally compiled against is linked directly into the mod_wsgi module. In other words, Python is embedded within mod_wsgi. When mod_wsgi is used it does not run the command line ``python`` program to run the interpreter and thus why you can't force it to use a different Python installation. The problem in trying to force mod_wsgi to use a different Python installation than what it was compiled for, even where it is the same Python version, is that the Python installation may itself not have been compiled with the same options. This is especially a problem when it comes to issues around how Python stores Unicode characters in memory. The end result is that if you want to use a different Python installation or version than what mod_wsgi was originally compiled for, you would need to re-install mod_wsgi such that it is compiled for the Python installation or version you do want to use. Do not try and use a Python virtual environment from one Python installation or version with mod_wsgi, when mod_wsgi was compiled for a different one. Daemon Mode (Single Application) -------------------------------- The preferred way of setting up mod_wsgi is to run each WSGI application in its own daemon process group. This is called daemon mode. A typical configuration for running a WSGI application in daemon mode would be:: WSGIDaemonProcess myapp WSGIProcessGroup myapp WSGIApplicationGroup %{GLOBAL} WSGIScriptAlias / /some/path/project/myapp.wsgi Require all granted The ``WSGIDaemonProcess`` directive defines the daemon process group. The ``WSGIProcessGroup`` directive indicates that the WSGI application should be run within the defined daemon process group. As only the single application is being run within the daemon process group, the ``WSGIApplicationGroup`` directive is also being used. When this is used with the ``%{GLOBAL}`` value, it forces the WSGI application to run in the main Python interpreter context of each process. This is preferred in this scenario as some third party packages for Python which include C extensions will not run in the Python sub interpreter contexts which mod_wsgi would use by default. By using the main Python interpreter context you eliminate the possibility of such third party packages for Python causing problems. To modify the configuration for this scenario to use a Python virtual environment, all you need to do is add the ``python-home`` option to the ``WSGIDaemonProcess`` directive resulting in:: WSGIDaemonProcess myapp python-home=/usr/local/venvs/myapp All the additonal Python packages and modules would then be installed into that Python virtual environment. Daemon Mode (Multiple Applications) ----------------------------------- If instead of running each WSGI application in a separate daemon process group as is the recommended practice, you are running multiple WSGI applications in one daemon process group, a different approach to using Python virtual environments is required. For this scenario there are various ways the configuration could be set up. If mounting each WSGI application explicitly you might be using:: WSGIDaemonProcess myapps WSGIProcessGroup myapps WSGIScriptAlias /myapp3 /some/path/project/myapp3.wsgi WSGIScriptAlias /myapp2 /some/path/project/myapp2.wsgi WSGIScriptAlias / /some/path/project/myapp1.wsgi Require all granted If instead the directory containing the WSGI application script files is being mounted, you might be using:: WSGIDaemonProcess myapps WSGIProcessGroup myapps WSGIScriptAlias / /some/path/project/ Require all granted The use of the ``WSGIDaemonProcess`` and ``WSGIProcessGroup`` is the same as before, however the ``WSGIApplicationGroup`` directive is not being used. When the ``WSGIApplicationGroup`` directive isn't being used to override which Python interpreter context is being used, each WSGI application will be run in its own Python sub interpreter context of the processes. This is necessary as often WSGI application frameworks (Django being a prime example), do not support running more than one instance of a WSGI application using the framework, in the same Python interpreter context at the same time. In this scenario of running multiple WSGI applications in the same daemon process group, more than one change is possibly required. The changes required depend on whether or not all WSGI applications should share the same Python virtual environment. If all of the WSGI applications should share the same Python virtual environment, then the same change as was performed above for the single application case would be made. That is, add the ``python-home`` option to the ``WSGIDaemonProcess`` directive:: WSGIDaemonProcess myapp python-home=/usr/local/venvs/myapps All the additonal Python packages and modules that any of the WSGI applications required would then be installed into that Python virtual environment. Because it is a shared environment, they must all use the same version of any specific Python package or module. If instead of all WSGI applications using the same Python virtual environment each needed their own, then a change will instead need to be made in each of the WSGI script files for the applications. How this is done will depend on how the Python virtual environment is created. If the Python virtual environment is created using `virtualenv`_ or `virtualenvwrapper`_, the WSGI script for each application should be modified to include code of the following form:: python_home = '/usr/local/envs/myapp1' activate_this = python_home + '/bin/activate_this.py' execfile(activate_this, dict(__file__=activate_this)) Because each WSGI application is to use a separate Python virtual environment, the value of the ``python_home`` variable would be set differently for each WSGI script file, with it referring to the root directory of the respective Python virtual environments. This code should be placed in the WSGI script file before any other module imports in the WSGI script file, with the exception of ``from __future__`` imports used to enable Python feature flags. Important to note is that when the Python virtual environment is activated from within the WSGI script, what happens is a bit different to when the ``python-home`` option to ``WSGIDaemonProcess`` is used. When activating the Python virtual environment from within the WSGI script file, only the ``site-packages`` directory from the Python virtual environment is being used. This directory will be added to the Python module search path, along with any additional directories related to the ``site-packages`` directory registered using ``.pth`` files present in the ``site-packages`` directory. This will be placed at the start of the existing ``sys.path``. The consequence of this is that the Python virtual environment isn't completely overriding the original Python installation the Python virtual environment was created from. This means that if the main Python installation had additional Python packages installed they will also potentially be visible to the WSGI application. That this occurs could cause confusion as you might for example think you had all the packages you require listed in your ``requirements.txt`` file for ``pip``, but didn't and so a package may not have been installed. If that package was installed in the main Python installation, it would be picked up from there, but it might be the wrong version and have dependencies on versions of other packages for which you have different versions installed in your Python virtual environment and which are found instead of those in the main Python installation. To avoid such problems, when activating the Python virtual environment from within the WSGI script file, it is necessary to still set the ``python-home`` option of the ``WSGIDaemonProcess`` directive, but set it to an empty Python virtual environment which has had no additional packages installed:: WSGIDaemonProcess myapp python-home=/usr/local/venvs/empty By doing this, the main Python installation will not be consulted and instead it will fallback to the empty Python virtual environment. This Python virtual environment should remain empty and you should not install additional Python packages or modules into it, or you will cause the same sort of conflicts that can arise with the main Python installation when it was being used. When needing to activate the Python virtual environment from within the WSGI script file as described, it is preferred that you be using the either `virtualenv`_ or `virtualenvwrapper`_ to create the Python virtual environment. This is because they both provide the ``activate_this.py`` script file which does all the work of setting up ``sys.path``. When you use either ``pyvenv`` or ``python -m venv`` with Python 3, no such activation script is provided. So use `virtualenv`_ or `virtualenvwrapper`_ if you can. If you cannot for some reason and are stuck with ``pyvenv`` or ``python -m venv``, you can instead use the following code in the WSGI script file:: python_home = '/usr/local/envs/myapp1' import sys import site # Calculate path to site-packages directory. python_version = '.'.join(map(str, sys.version_info[:2])) site_packages = python_home + '/lib/python%s/site-packages' % python_version # Add the site-packages directory. site.addsitedir(site_packages) As before this code should be placed in the WSGI script file before any other module imports in the WSGI script file, with the exception of ``from __future__`` imports used to enable Python feature flags. When using this method, do be aware that the additions to the Python module search path are made at the end of ``sys.path``. For that reason, you must set the ``python-home`` option to ``WSGIDaemonProcess`` to the location of an empty Python virtual environment. If you do not do this, any additional Python package installed in the main Python installation will hide those in the Python virtual environment for the application. There is extra code you could add which would reorder ``sys.path`` to make it work in an equivalent way to the ``activate_this.py`` script provided when you use `virtualenv`_ or `virtualenvwrapper`_ but it is messy and more trouble than it is worth:: python_home = '/usr/local/envs/myapp1' import sys import site # Calculate path to site-packages directory. python_version = '.'.join(map(str, sys.version_info[:2])) site_packages = python_home + '/lib/python%s/site-packages' % python_version site.addsitedir(site_packages) # Remember original sys.path. prev_sys_path = list(sys.path) # Add the site-packages directory. site.addsitedir(site_packages) # Reorder sys.path so new directories at the front. new_sys_path = [] for item in list(sys.path): if item not in prev_sys_path: new_sys_path.append(item) sys.path.remove(item) sys.path[:0] = new_sys_path It is better to avoid needing to manually activate the Python virtual environment from inside of a WSGI script by using a separate daemon process group per WSGI application. At the minimum, at least avoid ``pyvenv`` and ``python -m venv``. Embedded Mode (Single Application) ---------------------------------- The situation for running a single WSGI application in embedded mode is not much different to running a single WSGI application in daemon mode. In the case of embedded mode, there is though no ``WSGIDaemonProcess`` directive. The typical configuration when running a single WSGI application in embedded module might be:: WSGIScriptAlias / /some/path/project/myapp.wsgi WSGIApplicationGroup %{GLOBAL} Require all granted The ``WSGIDaemonProcess`` and ``WSGIProcessGroup`` directives are gone, but the ``WSGIApplicationGroup`` directive is still used to force the WSGI application to run in the main Python interpreter context of each of the Apache worker processes. This is to avoid those issues with some third party packages for Python with C extensions as mentioned before. In this scenario, to set the location of the Python virtual environment to be used, the ``WSGIPythonHome`` directive is used:: WSGIPythonHome /usr/local/envs/myapp Note that if the WSGI application is being setup within the context of an Apache ``VirtualHost``, the ``WSGIPythonHome`` cannot be placed inside of the ``VirtualHost``. Instead it must be placed outside of all ``VirtualHost`` definitions. This is because it applies to the whole Apache instance and not just the single ``VirtualHost``. Embedded Mode (Multiple Applications) ------------------------------------- Running multiple applications in embedded mode is also similar to when running multiple WSGI applications in one daemon process group. You still need to ensure each WSGI application runs in its own Python sub interpreter context to avoid potential issues with Python web frameworks that don't allow more than one WSGI application to be using it at the same time in a Python interpreter context. If mounting each WSGI application explicitly you might be using:: WSGIScriptAlias /myapp3 /some/path/project/myapp3.wsgi WSGIScriptAlias /myapp2 /some/path/project/myapp2.wsgi WSGIScriptAlias / /some/path/project/myapp1.wsgi Require all granted If instead the directory containing the WSGI application script files is being mounted, you might be using:: WSGIScriptAlias / /some/path/project/ Require all granted In this scenario, to set the location of the Python virtual environment to be used by all WSGI application, the ``WSGIPythonHome`` directive is used:: WSGIPythonHome /usr/local/envs/myapps If the WSGI application is being setup within the context of an Apache ``VirtualHost``, the ``WSGIPythonHome`` cannot be placed inside of the ``VirtualHost``. Instead it must be placed outside of all ``VirtualHost`` definitions. This is because it applies to the whole Apache instance and not just the single ``VirtualHost``. If each WSGI application needs its own Python virtual environment, then activation of the Python virtual environment needs to be performed in the WSGI script itself as explained previously for the case of daemon mode being used. The ``WSGIPythonHome`` directive should be used to refer to an empty Python virtual environment if needed to ensure that any additional Python packages in the main Python installation don't interfere with what packages are installed in the Python virtual environment for each WSGI application. Adding Additional Module Directories ------------------------------------ The ``python-home`` option to ``WSGIDaemonProcess`` and the ``WSGIPythonHome`` directive are the preferred way of specifying the location of the Python virtual environment to be used. If necessary, activation of the Python virtual environment can also be performed from the WSGI script file itself. If you need to add additional directories to search for Python packages or modules this can also be done. You may want to do this where you need to specify where the actual WSGI application is located, where a WSGI script file needs to import application specific modules. If you are using daemon mode and want to add additional directories to the Python module search path, you can use the ``python-path`` option to ``WSGIDaemonProcess``:: WSGIDaemonProcess myapp python-path=/some/path/project This option would be in addition to the ``python-home`` option used to specify where the Python virtual environment is located. If you are using embedded mode, you can use the ``WSGIPythonPath`` directive:: WSGIPythonPath /some/path/project This directive is in addition to the ``WSGIPythonHome`` directive used to specify where the Python virtual environment is located. In either case, if you need to specify more than one directory, they can be separated using a ':' character. If you are having to activate the Python virtual enviromment from within a WSGI script and need to add additional directories to the Python module search path, you should modify ``sys.path`` directly from the WSGI script file. Note that prior practice was that these ways of setting the Python module search path were used to specify the location of the Python virtual environment. Specifically, they were used to add the ``site-packages`` directory of the Python virtual environment. You should not do that. The better way to specify the location of the Python virtual environment is using the ``python-home`` option of the ``WSGIDaemonProcess`` directive for daemon mode, or the ``WSGIPythonHome`` directive for embedded mode. These ways of specifying the Python virtual environment have been available since mod_wsgi 3.0 and Linux distributions have not shipped such an old version of mod_wsgi for quite some time. If you are using the older way, please update your configurations. .. _virtualenv: http://pypi.python.org/pypi/virtualenv .. _virtualenvwrapper: https://pypi.python.org/pypi/virtualenvwrapper mod_wsgi-5.0.0/images/000077500000000000000000000000001452636074700146075ustar00rootroot00000000000000mod_wsgi-5.0.0/images/__init__.py000066400000000000000000000000001452636074700167060ustar00rootroot00000000000000mod_wsgi-5.0.0/images/snake-whiskey.jpg000066400000000000000000010212021452636074700200710ustar00rootroot00000000000000ÿØÿàJFIFHHÿÛCÿÛCÿÂÜÿÄ  ÿÄ ÿÚ ûø ÝúóD wóæÙÈŸž~µÎ;¦Nyõ¨–{5w1íZ—‡o=úÚgï&¦ïš‡ÞSO7tè·_Oéj6¿µRn?³´€ü÷µsÿÉ;çj»G1úÖ=[¸Sëm›Z«Ö'ã·ç2°Ù8Ùåtó·ŸžÞ}”÷¯.ºŸ#gý;§€¾ù÷jä¯Ï_åAj]ÚNô-ZqAmŸ—kôësŠf^LõtŽ˜ò»ï§>ɤlýÚ«d}7©SÇ/u/Ï;w|ïézói“¡½ƒ_ßQþ±Òïß×d¢gÌÂæmG;~k;6ç\Ü¿¬ío²4€·©Ë¬¾UÛ8GåbåýúÃVî¢6ºî.lOãôÏy*§•(©ÕúN?£_Kyngv8÷mdç}‹ç·ÀBs·¥ÈÔ{­:ë]ÃOÁã¾_ÇkØ™åyeôv}Kå9¾ ~Œ|ÿC_3÷pAŸóÓóéNWôiš£p¨ˆÜTâ'@²‘õ.ò?iMD‰æ»eÖ>AgôwôOçoP³ÌýÏ\Ó$pÏå?ÓÅî]9ÒX¥Í> |\l¼t»sÁÖUO>q¯[õÇŽJúWúQó…I“k×€YSwáʯ¥¹ƒm´æýúª-oG‰›†—nÔ2sq‹¼Ž¯$Ί۪<'Ó¯¾| £$Ëܵà Øxsámãò÷Cú»³§±ÏÙ×çnÕñæÎÁÏ2 ‘Õ>I+éÑ~[°½»_–úõwƒ^h/ýCˆí7~w³ƒ´©µËÓǪqšïY5Nm‡®çè}/žµõs}¡¢Èý6œkÅ­´Ÿ‡o_<ü7Õ9C×h¡w´˜©pèäËù×6B7iv]Ã¥]}í­uW±é[émZúæ0 gòŽçΟ4údÝù£Ô5ÌT¸˜ÙQhdíW§=:fëæl^wÒëÝG§½çG˜û5%íÌ`oö]Iâ[—ù°ë{úLl¸Që.·±ù£ß'®9ÉÅé'©™ô'oóžŽú‡D‘zumýìP¤_ó–ãÃß"ûÄ74kl˜mdb¶ï–Ÿ|µ:öÎWqšƒ›¦ö=£þáòL¾ÿ /¼CoM“QüG¼òŸÊÞᕬ®£‘g#¶"g|4Ù'98½3qzôv'Ú¥ºVÐ ä¶È€)Æjÿ•÷sü÷öqã[äÉk™cŸ6&vJ]òd0tØØõ­›/Išþ€QK=¶“)¹Dç]|ñ°óçæ?µZõæË?{I­rw²Ïž§^›{Õ4­å÷w›çwúË‹l^b伨pñ‡}óVÁ¤?1}o&V;të[héûÛŒžx™OF…¸~‹óì¦Å îþ<ƒÐ ~Dïu}„<<ľm¶øeÅZÃ(õÊŒli–uò+Èë[·]}ã[Õ¤hÍ_~„ú¿H÷׋›Ì`—д~yÎý@zíΕ‚ã®-×£KÈÉË­­²ý|÷_˜úWÔ|ß3ºÃm‹·Ï¿Ï¤xz£Ó±Ó,:Zóˆä/Aã_p›«öºç¢`šëò¯pϵËÇ×ofù“¢}÷Ër;>°ö×_+ìÚ÷çÍ•l^¥/‚=+Ô¢ÛD]óK[¸«õŒ\.™©äÞ;#¤~ýñ™µVÞlXÀR‡ÌCå»®XøƒÙ`xíñ“°õý瓺¶®~·³cVµÁðýŸ®Zï¿Ð¯+Êú¤+Ý— :|šsáMóœ~rôØ^IÑgFvÕ7÷«é^òô»‚'¥ZÅüöúUé4»Wí}!½G¼ÙúÌoÜNk|šóÇÓ/u¾9¯Ïý aÏZožq'Úk½ËëWMwã;FD±ïZLÓéMc)éX.oñþ÷89Ï–ÚxžÔÿnÔ|Æw9ë^C.‚Ý´)þáQà§WÎñ/BÔ·òºCØ´M«í:Ü÷ëZ¤Žµ&pœá<âN§ðíâ1òö;L²çùû\Fâ5ÌØ=±a£A ÛAu{ëjüú+p“´2ÒtŠn‹ýBòz—Wø€´Ö2ÃüBã›~5õœçœvŽÂ°Ðû­Æ´ÚªäÒ¡W—ø Vª‘Âj-µ”ÁSÖÚÝgAýóåS/L¬Êúpùñ»=eòöÛÎ_9zVÞ‡G­k¶MQ·HÓÅwAk]e*¢Pm±•së㓦÷h”sCÙT½uϧkZ=ßÍrÞÃ=è•¡ÁȆ¯“]ü{¸ë™÷nfí´l}wä.º+z°Õ»V¤”žyßRz&b¦Âl:+ÐyõÄ]A¿k¸Û¿¶—z}Öíͬ®€ Þp>ag ø÷cƒyvÓÉ÷hõçç™0S YÉÁôk_óNYÁ½âàÝæ«"ZYFÑ´«Y±¸[è=~y¯?cûÐìm¶³ mo¶¾ÂÑ}eàœ?1óë/S‰Í†­šòîËsQ[ø¾×̺_¡ò¨Í˜ÓN³ÉÏh]©çZîšË±Ë*xÊö¯Ö”G{Ÿš}sWÕ[n¹Ñ¾ciõ¦¶ƒqúƳƒÔî(ú6}¯õžûØS…ØT›ÔxÍ¥|G‘ÏÆyTºê¿Þ¹÷KôM¿sŽ•žÇ/1Ë8=5å½ü²]EÇѼ輣´í<ã¾eáï~ѯ£×ïO<›õwÏ£tÏ¥ëU,ñakl®½oÉúcRç>#sVoW/8–t}ð~Smiæþ@ïmEÞ)å×ÜWç^˳h¨ùWÔíò°ò뽓¦ÞÔ:ìM[5Ôx}C7ÎxsôŽ~ß(øwßôLžÙÓd}[ðù=¥¾jµ®£ÖºÃÔ¶)7ÒÝ5€œ}¼àT—Ô~uZSwÀx•µ‡”çõ7:L–º¤¬.‡kÄZW±N)£imÛ6"t­q³Aß¾yÞö$ù}uoBJó¾Ü=}¿Gç?MÕ¯ºg”ÒvûóœþÑÛ5ßV].6Õ,zÁ´ÍŽqôõãÜ5°ã4ãe¯3 ¡]ÌWÂ/l¼“%y8©V¨ÐI¥C"7£[ñ °E­ûå"ÆŒ\dÕÛN =^ÓÌ>­ëýéá>WÙšŸ‚|àô@ã?wó®vôšKøÒe´¼}ù’wvÛÒ{´ÅZï [Ì>¥156£j$ÞÅOûØ Q;T“×÷°S‰Î¿ù«iÄ|ÿg&ôz…_zt¹éQe¡Q–­]ñÖ·ëSúJžtôiºÛi­ÔûœH½ºs*[S;¸üV’%Äÿ›SùöªÜ)ä}¶¾£›ì_Ív]›2ªêê5i}=Ýâ­q‹öO3å}[ŸõzÇ _½ÃÎ>`?9ì±_™oeþ—®{t´Ôäù¥‘J³%8¹"ºÝ§Óz´vÒ»½¾&„ôŠuÔxµ¼L<Õ\\uW‘ì=äÙ>pýAªEmêsÕò6¶—ïGËr÷}Ý]YxýÍêÌü›Öûa_cÁ[w‹™õøÙ}úçTÅv(WËû ëzÖ¼âæ…'kMNfÏìòw1;Du ®0o¿ŒœGïUÚ7~¨…_E…ÞõÚÀ³Í‚cM3è§Ì·¼ôN£ƒ°‹›®ÅØ^Mg÷Âðú™×ž¦MÒüJÐÛ\ØÓ.½{eZ{&³”ܰÈ}޳ܮó»]9ñÞóµ=Mýņ±+Ÿ<ïaÐò3Ácf¶Ï*wÄ]…¤_q®÷m¿|ë74ú¦ßLjÚ¿mªÈFÇ•ƒ.ÎU>~.Üð·™ý?Q´Ï’SQÛïÇ—6§Ÿ›^µÇ3ïµ_½¹÷Ó¶ßl)ö¹3ì^‰cÚqyò©“ô8ÿ¼€YÓeæOý?n`Õ2âi-2׿7(zE;Çè9ؾ‘|áÁžåC¯ï鲑9½Ã›7¦&G’œ£žwiôÐi÷ïÔºšÎ÷{.0§{ã›Weùâ|èÏÙþ5ìVZ‡|©]c­‡‹èÝ|äë¹­„^ÁÆJ“Ð:íº¼òû„þ…×ã6‘ï£ôÝZ,›)8µÎÓGcÚOŽcìMs/@y·£@¶M:9>²eMÎÁÖ§ô>‹;nUãõÛ]°vç-·oŒ]Ç­Oú;õ‡=¾…’Üðú“ÀüáF?z5=°t325Qâ^mòGÛþwzåF^'\´ x;$Z×® 욪ÓÖ:Xeì;I3wžºso¤ÔØÉ««ŠÓgjVùx°ô¢é¿ºl-fVþó¿R×ûÃL˜QØõ_”Yý:ñL½KgÁS¿OñȘ¯þZ{6M'ÒG?Ó/¯4»Ë^—»dpç¯4bwy݆su¯Áø,íâûWÁ­Û>½¾¯ë§wj˜å¦úºì9ë“‹?iéöVÙ»àmy£ÅÝ>Ñq³2m=>LNê 3žªÆSO½D®ià{œMèdt§–m_f¾V‘»'ÔW³èÇŠê÷æn}LÃóžçó—ßõ^µÐe}VÙ©ö·ºÒÈý¢¤yéÍýàþ±H¶ HO‚ÝüÀз™>ó¬æàv¼éš}>Ç\ìý,åóã®/Ä\ØOµ»¯gmMCœ‡iU-•Ï\;ûÎZ£n—­6lЇ_Ö;^›*ªoŸ=Ùz³Ç={¡u7覉¨ÝYã«?¥i˜¿yâ­ŽE§mþ‘®“`¯úÍE›¨=£^œ}/­þ÷xÇͼðÿ›o®ë¢ð_ÎÛçÏ¿HçŽ=’—eêó.xæ=. Þ,8¹|E­ãÀ68˜Ù‘ëã“…™Ž÷×Ð7x†ZYútŸ–Úk]¢”}ƒŒ ÚÝC»ëÕ:Öo窼wÚ0ø”VPý–ùóK˜Ï[>vn&b­e†×·?=/a×»Ø/?Ÿ×~³M°~§Õ*Øu§µ ÞÑœmõœZü¯Ôöîß+¹7Ø)ºÍm¾¾üï?¥uê¼ti:#5—Ï¿cã”ý{UÕ»dÖ<ãäf¼Ç'³<7y‘TI†lUúor­w…¬v¨y´»[²Þ~é[7S߸§Þ<Ÿ«ü‹oúQáZorhõ·X¿zu¹²Åqa„§ÃÍëkÑÛ¾§õ[Î-;‹|¬ØßZê7[ Q»ZÑäüëkó¯Áw/˜Í®\·êu»×ÏçB¿)ñ9‹ƒÆß‘¹æyÖÿ8ý‹aÒ擤÷Ýf?g›e×;ÎT‚²×#­<µØ9±¨ö‡?×-·V•ép+Ü]æ{Nv¶~ÜÒrll:7Óß&Ô«ØcýïÖêÓÅ´l<ß =seìßÖÏ&°ìT©}9ªå·x`b÷´ÖòÂ>d½ùå{¯ÏO{×wŸiÈ~¿C*¦™ô£ç«ÿ­>KUs‡Š™Eè’þN}¾Co):Cʱim÷M×[>(ÇY³Ã•…*ã¦k1ñ²¸k½¶žŸ¾ìíWoÉÁ¿ÏÁé(¦Í°µÌð-¢‡íwξU{sÒ¾l§ã©+†²¼ø±í\ßb÷cG°7¨³£5Üç§WPÁÞÏRË­>`ؾUèÛ_ËŸ¡µá íûètqùÙö–§é?Ï»Óo)…•ŠåMž_Éï¡=3'þUQÚ9gçðmSÔ{¥t2ò§ ;½žzܼ8²úKÜ,ù™Úë-ão¹hwÑ[˜}æ÷q;ªýsµêrþLÖ7•lOrñ§tå9÷ß;=K/8úfµÛW3ì}• üòt¢“{=8?8PÁ’ÏJÍ®>m¾ù/ç›wÌ£5ËŽ6 Î|ßêïpH›Ðãêÿ+ØþŽø–y…6oš^Éë˜yÓæT•z#Ñ4}W·húS}×ügÇï/Síã4½7ºqo›¤¢žÊþ<Ý¡¨zv©Üõ½ýç;ΧÝ4í½£Íú÷òþ«ÑZ¤M#¶fäÍÊÃ’ýN’5e oKÏÜ/,Ǹ,£QÙsÈ}¢¢kôFº xûSÇÛçy¡ÿ?]|rómëçç¼iZ¿lŲ5‹xå–8mÞ+¬YîðãËÃ….ª½°ï.óXmÔ•Ö·¸ÃqªŸ6æÑîc5ÞÅ‚ÂS 7­nqÖÇÚOSm°õ›iµ åŽz¯]£^á‹’‰ÚãHUåm„¬'æVÿ]¼›FZ@´•“Îáß'¿ÃØ¿Xê@žª8{¸æà·šûæÍçD+¿þÑ«hŸC©&íÍ+eÍq[Ì~›_”‹›' ¶.c a2.*to‰Ro }Tú9󃾧ùoÂùTø–¥?€6 Ÿ~í«j¾¾ °ö«†wYyFÇõ[æn{L¥¥ 7į§=Ÿ=Ñ©ñßXn|zÏQÅ̪½‹Q˜‡ƒ' žY0»ø^;3•ý,$×\âíµÇqŽ-ÏL·Ñ$Oõù]ƒ®k\ñ¿vÖ+qiv?GþrÙúŸVãqî”mçÉû/NÊïPÀz<ð }üBçF|3¾Ðó)Œý"Ú¬¯š¾Ç’½®öJÈ&ÁÍæ9²·–™3±<³7;m[^ƒô-¢×¿H­Ö©C.»C&8Å­6°Ú Jê_œÌ‹ÚGË‹“Uo’ ^’¯£ËÌÂÁ ¬ës‹>ÑÕvMg´×ÚæÍïœãe¯ÉD™²uŒæR¾¿üײôLŠYŸ¸UÈþ€¥’{=GçûÈýÁÑi­çç€}.ÏQ“³÷ýcNy.ɶöª–{µ‡Ï?k뽩÷‹œt®5æÍÓ ;d‰a_®[Œ^ûOÂK¬ÌC“ë¬jç×ÇÚŽjËœY¨ö‰•‡&KO†G]’C]së­®"eœ¦·-¼Ðã%Td¢dØÚÕΦÛõéþ»*MU—è7λÕ ɸ¾ˆÕò¯kõ¸<ðõÈz©u~ añ øÇË÷¸ê Ø VÎu·k¹<\YRÈä‹Y?¾ŠíÉž§W»Á›Þ>¸+žúôÈÆ‘K´[<ü]tÏ{­>òêu•—¯Ãw~ªDºŸ†³¶r ¦¤ÝzÅ­ùÃÌËûÌ-Ÿ¬e쯻å¿Rƒ¦÷]~ÎFM¹¨ìXœµuxØ^'oöæ-‹¤½“M¾ôˆÓ¿­õŠÖÝ?8~òž<)ŽÜ sò¶Ù¯~vÙ0Úä˜,^5~ÍÒu¶.ú«ns/¦Të-®º °Wë ¾¶?;®~L„9Ø,´ÙȲ±3ÉêrHª¹‘Õªõ‘(§a'<f‰¸‡gšUθK 7¸9ìï—ö÷⌒Yuÿ-~ŒëñWìºm±«Xà³G¹Bè?4—ýük}´ý 7éÕùïz«’û@ñÑùÃÁøXêò8ŸáMךLýmŸ6ØøÕ{TM%¾ÒÀv(qËYX)ù0ó`ÐɆ^Ø©•ñ›XÑ«l’š«™=¦N.­\œ,y¨R#–¸òQdåágÊBg`qž­ÈdÙ¢Wr)ö·óÏoªß-WöïˆÆ¹ã·Šó•÷¦hÆÇM­mâÊêúîý |Ÿe#ÃÖYë´ÿ›wì­[÷¸‡øSëÆðm›ã¯‚íz¶ÿBè–œ_ëuüáé”ñ+šÛlœRí"Œ†vH­§K ð“ yí:ûkŒí²õºâÛ)F~²|ã_â{¯v˜Ñ2±3ÕÇÒ;oŽ ±C«†^ÈÕlµ–Ù>C]–ß&/¯Ÿ$»Êét]ÍïÍï¤8Ð{ßž¿–ýJ‹+eøžè›å<¸8ÓsÛ\Û¬›_ï-'¾C“ÏG‚&o˜-úÍÝ66›aŠË—‚þŠ×bwQo!È“WM«?çx¿ì¡ÖÑ"·}a7´Ô{÷ºéAW+!‚e¦\}9¾‡›?¾j¾L’¶~B5t÷[1¢%«æÏ.=é1 ×Ð¥UöY!ÿBžWú¯iÀž÷˜½k]ëŸÙ¹3×õý]µÁÌBÁö3ä+¯©?J«{ƒLŸg…1ú*—e}Ÿ§€ÁùÐáùÇWÄöŒ~Y±Hô 8ìY<ôM¤Ýcô·–Ûm2Ó¸<ŠúŸsª¶íÆ~ ¯_îßI>g™ŸcÉÏÁqk±â‹È—°=>“pþ…huí±€¦ÈíAà{_É?)Ûv™Må}Ö7%{'Nªñý³1W =ëcÃmŸ˜Í#í/ɺܾ—µÛqüHûŽú£[lTòJ» ­]Ù¡ÍýáªöØ‘ØX©ðVœWé÷ˆ²*ë<\¬vÙkªñÌ’¢Ç'E ˜"¶ø¿9ƒu†Ïjiö9Ø3u¦ÑŸj®þùÖãê߯û–ÆÖúÈ!bȇ–°â„î,öµ-{íoªtÝ…õΫë(øx?8YiÒx[ãoDùñi—féWÝ»ããe¢îï¿ÍPÍËÇK´Èý—Ûh{&ŠOÒÿ™bô=µƒ…'æÓÒ~+}µC¬¶Š]µ[ ýŸªÙìMRÛ3¶ÒDZÓfÞ. ƺŽH¸¹1±’aŠý2ßàëy…ûÇ{œrü¹ØºÔ¹=Dø­ÇNÖð]ƒ¶|ïjém—ªçŸÐbâÁe?¾©Ü»D6Ûšº${tëÝ_iy¼ËרÀO%(<ÀþgÙâ¾g¦õÛ¾¸Ø9_ÓéùÛÓYÔ]«à›Œß\ã:U\}`û.h¥Î.ƒó|½³ã¸uöÕGð«îú.Gõí:žH¸ü˜½;O¨6üÔ<Ùx=±s8©Óœ<øùXàìzã¤ÂO«µÉÑ‘Íæù(ò«á•(¯•;Öì¥ôrgúíŽÎÔ­66«ƒak\l]nTóZæ×>h½×0›®²‰¸¶ýœ\6™aÕ;ÅOm}±ä·W¸ÿy~žO<1z$¾zøÑo ñdéÏZý÷ÉŸ©éìø²æSÓ¬,d÷ßÌû¾ÔÓ9¥—=»,jߦÆLZâ({6©ÇÞ×¥à§SÛsÇçn³m¯9Ö×6E<{Ž’,d×ÞFµ°—>ç8qÓ!×Á"·\v1gë§H+d]bÉ_‰2 βšYð û2švÝÓmíûǧš&ÀÕ§m=Qq‹´šžvÞÖî6¤ª[8¾–}ç{Ké}J¦G£ÁJ ^|¶Åþd¾ŽUXU‘]£!^ñ¬bùÑô½×]±åZÖsàß>k´ôG™]̨¤ÐçRâD'aï¶h¼Íëvƒô *礙­nðìà×ô–Ù1xïã§KœÒ þp“¹°Ë‡3¾v?Þ±ýñÚuEmë,†Þy¼ÃÛbk;#X•°Ç®¶|rŠ\ò*î0x&V= æ[‚y*nñs¹üÿkß:l¼M¾¿Ørkû7ïßÉm1ýøa|êfüõô‰XV•ÙkâŦi¶UÒGÉŸ±u˜uÛZíô†ó®êýº‡miÛŽÔÒïóprbäñºÁ ¼â7k°QÜâëkß%^+,óf¹ÂÎCùÆ|LȶÒkòÐrLhíp³8‚ì¿çl÷Ѳìvâ~f» ßIÙ(’éeÁûÛ‰-DéVÇ‹™†;g¯ÓË]²õ;‰]ßFy•†Z5V„ô:žëùÿd–ÔÚ˱×u ŠûͶ_Óº…NÜQŽç’7˜GÌ{6f£¬îÞª+Ckó§tß {x6É]ÍÞ¯K¯/©õÆÕÎÝuÛ¢ßÓâÇ >La‡…_´§«×™U½^“¡·šoŽy£–>n²_®d^á“…ŸãO<ÃËDµµËÆGm¯^ãòÇ‹ÝkÊé×]{l_aÏ×ó ¼®‰]Ôôg—î”;ÙRíÍ®mwOoTòúY]?ä¤Â—ñÐóêo@¡Ü¾qÔ^ewœ¢#Tl¯a£ÞyùÕ¦; o/|5êÿž3:ED]‘uI¡¨önz±Ù´†õ‡ž=;Þ Î(;Èk2ñÿ¶Vê½¶ªIY+/¾´Ú*-³CËË•/ó˜78­$u¶U±ç„lu¦ZºÙJê6ÝfÓÉ«âæõÀK«•ÔZÈëo.pô²‘S¨÷-kvè›<ÒƒfÎ@댕O®6¨™úù›×ζÊy1Ñí_®6Ýv=aÏAùŽÁ¼´ óí~~æÕcáîãuZÉ¡cÝUu[ØuíŸ÷“•Û¡D|ÂÛš~ô}ç/YÅDÍ«<›fM¬ÌÙ×EôÛˆÇ9ySxÚôóa–‹›æÇÔzæµÛ* ;Kœal Þb“š¯Ü-rëËzo²k¹ˆvSíwk¥Û›ŒTz—s×çÚå¶fÏçŽr±£Æ-u½o³kÛ/VÜæ”Vyù#–t±ËJü$ꮇó-ß=eü\Ón©ŽYVtg—í½²OµîÐÛØP]ŽÌÕ»mí#bÞšT«[ØA¡fÆt•¼èyÚÔP··éy頻e^±I­ü7dÖßn3Û*5{Xh[˜¹'ÖTM&ÿEz-—9l»^ îßÜmî:”jÖuæ8›êÝ7¼ëSJ™e&ÑÁ–#s¨éûBغÆ×.¦Ú23c&ÃÁØéZëeƒÑ¾a¾ã%ö¥Úºþ.|ô½S üÚêŸk:ý,åTµÉ뚆ÖÓ.ú/Ëïa—œl {¾Æ®Ü ô'™\g¢IŒZbÛŸ_î½óàóôþÛÎýòûŠ5½æöØi\o}CÇ]_µ¾‹Ñ«ûV —¹RkÚ`?íšÖ!Üî=DÙ}iø—{õþnô}ÇqhmkßœLÉ{íúÏ'û%&j¾ó) ,B÷ZÒ[þ³±um†­ßáfÕæ`qÎ^¥£Îuëî·{g"^¬~¬Üh7ŸížºuÌAc&ôÚkºËjÃÑÞ]°`ìpIj¦i]ÿ]Í×åÛÚUîÄÕ±è_GƒÒ~Y›}>¿uè–ù\|è½òmy%#úewwùM§6nQz/Ëî¢vøwö§’w®sÒ¸+÷ÞÛ¬J}†—óÐdÌ=Ò‚äÛ75üìµ]íüÍê•y¨y7Nùž³óÚ}Cç?ÒÖ¿B~a£Ú×Pìœóêoû®µ±õ-ªÓ. Šn{õZæ6Míç›Ož±ãö#vºö¹Ùqt¿”Ýj]â ¦<Þ.›Þ ìí?>~ºÇJú >ãÑdk펎«¶*wÝn_æNº¿n‹½üêܽBVxÕ·{½]øµÞŽôý¥ªÌÂOïa&ËÖ§i]âªwC&kÌÓ_m^^N^ªó+.ÃWÈî¡Í/ºmßwÖæþó®aõ럿ýG­<¿|äMæEÍfÔº×,z–ms·s½¼êÖ¤L[X³·ÉÇΩüÏfé›–»7¼X£öµÒµ-£§ìS,yHüê=Ò¢¿çZäë<±£V̬9ÒJøÚŸkÇÐo'}¯|:÷‘=®—|yÕç7zž± ®“¿ƒŸ÷žböý=ãËo’»)êKWÓ)~f%uÔYÙxrr0HëûHªeË*3Ìé$lzÇ5‡ÏC“}sxæëÖ¿Kù¼·Ú¨ýðù½ñ¿Òº[Ï=™lehýÿÈò5Õ98“ætž‘®v|Sío]šQq<×.xƒß¼ÞÎD9ö±'Ï<[çé¾¥¾‹šß6w^–qÜàíÙ¢Èê&ëÝ¢¶ç¼<èºãg¥ÆÊŬöºŸ|fÀYB§ÏZ,n;\uí2ÿ …lY¯âóÒu¯ÏýëÒóYM>oLÿ½ å`Ï‘Uæüí—1ØÔ¥îš|ûGU°Øñ²í ˆ½!*³¤¾Èó‹Í›N\góר|íÓîµöµ´è}§M¶“fj6¶ù, { th²ísEÆÈ˯¶:x>Í[o’¾s­dÖ»loÞ8ÖÛD,Dþñ«n=%E-qàçÃŒ[Õaå×~vS[Œy½q–¯LV¹zÒíÓ÷•æ ¹¸V™ù?yÇ/¢—ÆVK¼s0{z㜎rï<Ä,Û/Xǹ5™{—6g¦Xì®û?T°Ùql+H]±»k›Ïë-¦ULAæ;7Ëïœ=;¢ì6”Ù!·iÍÚ¶±á†ÝDÂX`ÅMÏC'û,X™¢w5”ûDÚÇÇJæl7]'[wÅk–=l]ìda¯ÖM®Xù²¯°Jýç=0åâËÆË«ÈÃ픉2ÂGIUFzY2ç+{þó––L2*ÛK •ø v;< Y'fkðì=s¾ñѳɠæÅÛõÊëö÷P¹ü“Y=¯“º(ì·Efµ}QÚNyì§Ôê}öUîÅkÓ8[á¿iÑ4Vz¢&}9¸cÕ»mv¸ÙëðV9"·(wÅŽ‘Ú=iÓ.²Ó7lÔDRÞ"_l† ¶™:å"ä´ÉÓßkô{>GZÝ1åâ-2M¨æß¶+ÌëqÆN,ËnñjpÊB±·ÏS¹Å‡™c‰™Y/¨é·ô»Ÿ©gÌA•.£‡•ßÌšœT¶ÔÓv,üN¸Ë ™Õ<ݯŽ^óïWÖ›Þ·ÐZèòô­™ë³ó‡=ü‰éœgå¦ëÛBlñ °ó#c%#v¸1r{Æmë©ó“3nÛ·lø; ºwò‹m"E¯|˜xñ’£ä"Ë£’&b92[w˜‡qG$~ý³0²ÇìjnãXe0HŽYÔɪäßE³Õ[~º>ÍÕm¶¯“5=ül’J˜þøNuû{ìx!%NÃ׬óñòÈs=÷ÇŸ¬‘±»ÃééQûì0ËîqyÇ[3÷ÂéS¹#äZæzûM)Û¼2+›<+`­ÀMÉiÛ˜å®>ƾ Ör¹ŽZ@ýéŽ?e‹!…¸éy}.Ñ«õw‹™ÙÓ5œˆ¸éؤU Ɔ㤹]4¸½¾·°5»¬ýlß\×gª¤ç Ù~óS&ªH*¤[牞®ëš®Éa+>c„zLœ–?2ÈM‹]×lvÇo3¢¾Fáuë¿¡´ ‡îúíL½}÷àO â{/8|çè¼ýÃVñÚ1_/UßDzH×[%u<}áwµøÉ“ð6ð¶¿:È„lQ¯âöqÜ1»NØ+òÜ‘Û82Š‹:bRïUOwgž¦_I'#œ|ÊÉM5„Š·"diMN\Ívk¬}v¹&u¯ó‘‹×'¾2dÝK¸sƒ±Á­ökLìòÊzùm}<Ö§'OÐs¼faؽ£ô­ê›sé OÞ~~u[Dí®>rÜ5ljlÚæ¢ÛZö°×“pC0d×–½#˜å@mâàçɌω·éˆ›:^&\+)-»ÄºÅ'ßl8r­³WJi§äâñ“Û#›¬Xoðñ&§â×?9xx®pËÌEÇ_®J¸¹÷×µ—ûse2UN±³U½ò¤bæbÕ[uÍn¶ÛZÃ!«È³ÄÛðÝs6Qý/¤O=w_ýì<=­ë;kŸ¶ý=â[¶ŠTWXO^"Ò8†c@{u²‘Þ%c›\ìp"W2|ûøiýï#õõÚ$ºŸ¦Ã§ë²ëáÊqä™ñÒc…¼g×HisÊ1võ›ŽÙ+wÍ“É3aƒ×^)äí^T{në½°ºÂ1È×õùµ´œšÏ]±’æé¦#÷”e™œ¬ÁŸÛ°uËSÙßWh½’"óJx‡—Þhß õ¬f'HýnM}\+œ›'¬Xfn¶³ëÖ=$YbKòs&‘]±`çœóe3½ÿ^ùM’»¬ÙãâæõÓ‰vçO†Õí<õC°JüÛܞ¹9qs=qdga²‹›#i÷¿¤ô©%^~*öد-¢P†ù‰à¾•¥ñ`£7nv~þô-+³¾­Ñîlºty9Ëæ@â_Ý,!vËDã?“އ·ªÜÞ…®1wJëë7?¥uó×·¬<~væß7V8l5ù–òåæDKéçL}\ŒL,´ò/&b¾çœ„ˆõ,ø·¯ÉVn:}{Ôã¯äži¸õkÒœ§ÍÏ8Ýù‡.8>ɇp]Sýj÷M'¦}+^ý䈬“À_5úGxþíG.ìn¯Ad¬Ì1ßNÇ™‹›þø²ÖQ+ÎÅF»'žx»ÉƒŠM´YY H¿¸ú{ïÛÇ)ÂÎ’óÇ4ss‡É߃¼Nvmu†f¸“›]Íç^ÈïûL„ËÁ‹ÄŠÎìß*»£q×»·zÖ.òuSëÏ(ù†Í̺uÖ—¢ÍúÝÔz¶eü~3Q¤\áx•ÓgÄ–ébs±Xó*ôx¬œvÙûۻ׉Žã¼{Yy¼ãïi×çZãém'¦O¯Z¼t¯’¢u7ºË6ÑÏrÛUu†Á_Ó;$-™kôó„~.]eK3‡Á–[ßÕ¯0ç´éÒŸ^s¹’«ÿòÊäc“HïŽîüSëÖ¦N?\{ëÌÚÆ>ÆŸ1›¨ý=òó‡ï ÿÄ6!"P#01@ $23%A45B`ÿÚý²ª&,¸é‹a0Ûv·ÑöJx·5MÆÚV¿=λ&9³º}Êúsëâ[ì›·ÍØv4_>êÎöÜä|ï[!®÷)˜+9 Å³âÆ°™Ý–öŒuuÕ‡’qÝ…8¦È×\Ži>Û~/4iúØ Mxè›ÑF´Ùö(ncݹä&‘‰œÑ¨"óX ׃ÎuE%Tʪ¸­möÝŠ(Vòg65²‘"ÔØÌ¬¹êE‹m­TxæwÎ ÕúЀg|k2µ£ïLÒ¬êw‹È“þÛ°7”Kjè΢¸pÝañšü{¹"*=¹ÜmÄâüi˜4jåyZÅ  ’u ˜!Ö±mö݉°¬¢IôÖÌ'b±å«ë85s€ñPk‰Å1ÔÁñsb#PšûäÑ Ç$("hm~Ûnž-­eXë,ªG†9¢ÛP ${\\ZØ©žÛ+âc+!;E"k!#ªµC¢ë-“S•>GÏûiäl‹BÄu{47ŽSŠf½«ïÝSæ7*9½ÑŠNú¬‡ÇžYèx4Èœ>Ýs°¸å×Ñbcšç+“>XÜgn0F¹¬¨QøÍ[OòÛ¥#—¢_%ÿü¤7æO–?~¦ ~mEVÁOž¸$nDG%5BvgÛ¤­ÚI;ƾä|pª˜îݸ¢ª;¶'ÉZÿ¦½Ü—\*¹bª6®§þn*+²ôd­–=€d#ƒ\ñwÇ7<\`W'v‚7&kÊá,¦Ÿþ_n±¶yöK'Αeg> ¢K*¡d½SÈõÏ1W<¥î‡#±’¦93_‘ÉÁ-ZÒT|ÁöëGI FëF v¦©deQBn‰X9þ3Ðàƒ\L‡YVL©§„™^ym‹X·OjvºX@‡jçL›YGi$‘Øò½¹äƹëˆãcVej»—OaGœrCY‘ÿåöí†c¢&ñIœËã*?ÒvÂÂDU„ç/ ~$5ÆFw'ƒåRÅçÓHÊéþp?çöëx”ûÍX eÅ\€ðZ×:bÃì¾ß‰_¬EÇÖ¢¶¾¢ôð¾ŸÝ þn;yfÒeô’e#ªŸÉÕNŧ"âÕ«sÛŒ¯^ίN0à5WT†ÂÑÿ_íÏÍ•¿†íìØíǸ±Ñ0‘ûãÁŒ`¡GšbZOŽ$–ÇK•r+UÙ¶ÜÆlйР¤7ž§ÈúÙ­s^ß³¹Ù(ÈÆøœñX±#fÄÎ2%>Gª=l‡g4ŽJÕì°\ÖÁØ-Í6Tqã/™ûØÖ¨«&@ë ¬‡žEPù‡OwþàiÙy ½>ÃßûqÎâÒ±Lîý³`â º dÅ”4Lªˆ6ÍßUsÓÊ–œuºà6dëó%,œ›CÑö‹óͺñg`á…TÖ66’F:&FI ¬±tbýq>*½³¾ýþý66;Á Œ&lU Ó›ÆûqxÝ çíUð|ŽcVAé£4@Úû6âòRÀ«$¦Ÿ#ÈîÍzZ9Ý8šÇÖ¢ü&ÂiÒ¦ÍQß¾\DøªöÅ\qù=Wº÷ÅË`úˆjމ ¶4Ž—tÖ¹‘S±aµà€™‘ëû¼ÑáóѶۜ¯üB<ÃT×d'ººR7(»÷ÄÎùe J”¶ž©¿¼ïŸ¯ö*áÏóO—ÁW&‘7µ’*­#­mÀ{’CRPU7ÖeÖsJˆÒ &qDÖ®Ýaê^þ*ä:d Òcr‡ÿ¤Uî™úä¸a+-}J~éq?±ÎL“)y5œ~þ “]a£„CYñ*âRHWÉÚm]MáÃ>¶‰Î˜ñº¸Jòe8˜¬?Ô†úkU|½#wÑßåðEÏ–:*Ę¿ºýWâ«Û,¬Y Kðñ»•èëäÎe‚H Ô-˜idÎêg1÷ÿˆÝ‘œälðÕ;ˆH]DŸ"±üc뻥!þùɼ¨ÛõôœMôÝ×;ç|{;äc¯ï{e‡#AsŠÞ=œ¿%TþËxÌ“ Tol^XðíääœNÄCbÈV4§ÉV.¯v(䨉GvbœÂä¸Ô\¡EY=ZVQ'Ã’g4Ló²;¾wLTî‘Í?umfØb­ˆewÁqߠΪ¨½ÑrØÂdk+I“~Œ`-#¸«:Iœ9‹cвcËsÀÙÙc°t9$S™¤V¾œu醨Íf‹aÛ`kÑv®£Z[8¶¦iCl÷àd“%?ñgy*ï$Ä.ƒfãFc˜qµTný¹Hƒm™äÞO‰ q#5~£t®‡fê=«åßË–1WówR©&D‡ ¬Î@ÛJÃÖ^Rììê{#û<’HˆîâV#µX2ذ1%Ì{|êrg•#£Šýj0â¾Ç« ÓÜ퓬N²_#!±ÄÇ\çü˜®±ã‘YÒ«„$]zÕY-ÉÉîéûU\¶yŒØ5 ¬mû#uø;Ôéfƒ! -PHÖÌÆ¼)ЉÜè®Â…S|y£]-vÉpÛVÚ ž.ÍŽÄL«›íêfÜü=½ÞlWwJ˜e”SÉŠ!>g›çw#Ò Rµ*4‹{TF®HãôVѹ}ÒkÚ€¶L\ µ=è@÷ Ön}Úþ—'Í?gß½’9aß&Ê8ý^ØNòµO‚Dt`Ö†4w]F>ýü½øžD±«’3…c}ASÅ•|jéG˜ŽóºŒ"~¦He]b¹^UcߕՄøÎ PI8ÑÂ.4jEÕ´™×ëAÓªJ°HÄnvÄj&uL¿¢ó í¿LLnYEnz¾Uî÷?W•Pê~–ýiÕ†ó9’ªTC…3b‰þAÃUÏÎïý›ã ©0ämB‹°ÓÞÄñγèŠ_©°æ:+´®¢Œp¤ô‡DÝ ÚÿÛT2Îé¦éX¥ÔvΨ®FoåÛ_$}U‰GZha€äósXªp™Ä”Öó“°ÈGL±®–ɬTÁV1æð†:…áBªÒ‘¤¹NSvbÜØB1£³+\É-Ê šh#êýV-¢:Eèeh«¼Ãb¯µ{·óû-ç¬ikoŒ¸ôj>¤êK¶q}NN§µaŽj«üê§œ¢ÈVYÓí­ò† Ö¤± ß<“1Üa¿˜?1±rÕ&æh-L(͈àV߯Qº}6€Ã‘Y>(b•aÇv6+#´3›Zí{¬›V¸ú_âJY¾í:æÛ.Íñ¬ 3¡×̪¬{ØÂ’%Æi¤YÉa…c‚5³ŒœÕØÒñkœõÆ»æå#œÈ«Ý"‘1‘Û‚Ùk¢<Î3«Øîp çÖ«²`Tn‡%FàJsM¯^:1:{vÛFŒ|ç§vW¯Ñû=¬Œ_V%»K"lxŸË†—&y ßºK"½ÓV³Ô»º¸*É…åÜÆs„櫉ê6Šd·á^g<è™Ì¯ÆÇîÏ ¢+S»'â/Ï¿%w‘^.#rj97.“Sly¸h[­’§0XK YÈöûŒW òXç8¬à2DLW÷Æ™ÞAÂQí+ÎgÅç#kS‹+ŒkWRœE!­HÑÂëmFqÛ†µ<¨®£ ×4ªéû5¬þ„m®Ÿ ÛVdðO¯žx†ºg­@±Ñ¡"Úœ®)$H8@Dxبc¹7ó—,ž)*ì£$æ5Â޵´P’º¦Xî±ëš6gQö9 g¹[”3æB’§§©šK­x@a«$Œ]ÊÔ,»Ò·»‡YÛ¹VõPQL~€®T¤®b2=`0eŠ5„xÍ}Ü¢ãDZÈ$ja— Pu•) }×ä?`§³Im•Öu[£0¢VM€¡Êub!ÜZ.›[NI:E4Õ:1¬N¯Ù:/±Bmø7lîßl#H&ÕE)¾–’s&PŽ-SÇžÙó0>~g‰Ï3ʪ‚ñ0ц6we‚õUós––DãÚÈ’u„Ç-T`éëDDœÂºmjKn¿®Ëu‹öŠÁ5-jæ¾, õ,¢EKAI+{ä¤)˧оu†L:Ó"—œcD‚×:KÁ‘Ü÷ä m’?Ê#‘­êä‰ÇÒõ‰“­öM†:ÌÛ§$hµÚŽ’ƒ]«,Ëæ*Íë­ìSu™Δw•²hª,õm» Ëi`Ômi¥9Þñ/­ ä}¸GhC`²i¤5æªG<õ®9T/lŸßåhØŒHòZÆùc©B Sò)XÖC@¿ ž¥Ôñ„S Ò¶â #õ0Ï' ³äeÙâíõ$†“!‰Æ‘ÒM-ð ÿ._1öv(ÙÝò “\å€DŠQ‘¤oäìÖàÖÛž£<ñ5 Ó’lÝŠÛÍl'ή I~ {rlg:f“/ܵZ^,‹¹tòƒrÔ®ŠYk9i_<¯db•À |=Â\%…WtÉT~J«ð#àFL}Z#P6G)\IEòË“4¨Ð¦ •xz×"ûÁéÏCX¯sÊ11d“° Z±‰pƒWÇ1ø¸ ògN4ŸS‘$·„iŸHäwƽ{yS<íî–Qc?ñ"º{\¨½ÿ ¯ì›Ý$Ûe¹[9YCLõçùª÷Tìë-Ò]cø—>× ÏÉ+ôÓzoTz šüäÕi£ [zÑÓí®¥Ó«cM¹8I/¾$¬‡w2;G³ŽBºE\´4>Xx‡j‰äúŠf˜j¤{äïœÜ˜ÃvV™[s¸öøc7ÕCƯîaÖ€²dêú¤0¤i-ñD™Å Ê\ˆTsýJ f‚K=Ê,VÖlóp ‡i–É´ö û—哿+_&´6Y´ëq[O ïþYÖäùv}š·ñ¨ö¹iKgÈ.Ýyº7t›ZEm4BÅ-5Ñ« ­îqýû­4úÛwMâÓnžç¹ï)œeÄýX.aF¬_æ a/€g© êãùž¼ÞTîb&}YÁsÆuÁÃ’\eiû´˜ è  QR‰¤@a×à/ ‹³Œ-¤*ðm` CÔ ±†GQ­]•÷m¤vš๖)gž¬Y¦ß¹²FDs¶AÚóäbYÏ€Ù—‘&š~¿¾EØI{bÜj×à ²šÖ|Ô[4[a«y Þ‹v±¸¤W?i¥±i\qôì¹ÙqµRX!ví^"ŽÕ…s¥ç¬cK‹!‘]ŽÛ†ð7Ð.4ÁV>G©PŠ#b÷Taâ; Ì… ø.Âh1.Ú6àÅ%s‘âtÑÇĘQžH¸è Y@¯k¶+…·p‹M:ÔqÁóÌr?ݦz¯f-¨V¨„®ëu¯MtW€¨aüdl—™L5äøù ‰›DI‹é Êfýª’¶Mdÿ4+dç&Åî.s"f±bàüâìœQ÷¯&I·y3Þd¹Ç³œfðW½aÈk=GlIªæl$€!”É<Îp•sÔ½¹ü‡±ÎKS9_é•ÃsØÄ) ‘4úQÎfË"„(¬‹‰Ð#@¾Wˆî—ɬ§9\Y }Ì,Háõd“ Á‚óvµ{$5²Šˆ²%` 5êÈå|!øœˆÇÖ¾#ðÀwBñºPXF f% ª2Z ŠÒ\w³âEH”0äñb¼ !Í0ÛZÄe˜Öa5gHùxX‘qc«‚ ²DÉ!B¥k«Ý%)K!]‚s*§`¦ ©´Þ«lù½ÙˆŒÍ…Ü`Ò‰¢Ž×g4vZ ™\v+a5w[ÇóBÄyä&ÞGU²L¶ÌÆEˆŠA±¡+˜Á('’DRH…²a©X`,6JSðâ´QG-ê@ļݓkék›ÌÐã3µ„g1ÀŒ¨ùµc‘X`&jûärøŽ8Í>]ŒÑæêdà8à"µb½OÌÓo$!ú¤“À*ÛX³Ô`î;¨~Ž‚"Âßû±5Vÿ¾ ”]oƒ+«†Þ®ÍT4€7³c7»óbgxÌ8à×ÏÛ§ùÝÔ¹¤àµY’ÂÈ›SƒT6õV套$“ÌÙ,”¬¦]Ⱦâv ä0ÙáÀ9\먭ŒÙ6&ïR?3eÅDAÏt¨, †1aæ¾á–½µmV£Û@‘.b᥎ȓ‰XÄâ”-ñCsÎÝ‚¥²!êþ ¸Ï\ŸMæ’¾œw•ϳ» ”⪲d„ô­xɰ#ná “7ˆ¤‰#\Ÿ f{ãÑôÞ •PNí2 Lß"ÇhC¸ÃlÛ@ˆâÜÉùñö¨‚©Îq«õRJƒ^lOÒ"þ>_7ü;[QûIÊ™vhòs£“VD{ÄK¥ê dÇI™í•Ï )h¦gmò›üzYÂ3œ ïéÔlaÖê¡ñ§B´ wyY E„…¼ÍмRªõ ú†°rT"Ôc(1ÑÁ1—šÐÖT6°ì$ låZºqÇ‹³ÇY¡h{“uöÔÕa#!×jCtÐNY0ôè‰ÅÒc°lh_µ‡a© 7+êÉ©S³QC‰²m´óëµ+ºzjòu µ¹hñÚ“¨7k-m%I§LÆ@<Ø ¹®×eJ£tn§@HãßÄE‰´,‡V¹ L°ž¬„ò\5‚‹isSØA>ãy± m·+&Õéèe‘±$Yzi¨TÛ4óÕìF©— 9£Q¤4TXHòíõp<ìG’¸ÎH™(ŽÍÆ ‘VêT­‹å[W˜m’ÝЯ[C¾à ³ªô¤;¢ƒ`5fz«d8D+Iö&ËO ›†ÁÔ±·èQ}¸Þ<-®Æº9v˳୧³>ia3ñ@ï á£q‘Y‰;P1Ü$îåh#|£µI€†A d ­‡pñ+,=@ÆS¼ÐFÆ>¶)•,vTƆÝÑ,‰(rÒÕ…!)Ö[#µí 1û$¸™;góÌ<1Ø×ÑwHRR?¥Þ«¥4^ÈN„ÂãÚàŽA-6^=œ]}­‹pËWJV¶6Ðaš\;ŒÑK…[g6ÛH0ªï*ëáZo5ïMÑ+{²’ÈöÓ¢#í­Нïÿ®+œ³Žs#ÍŸÍ‘HÌHÄ{A ˃ˆÞ>Ø%G:!ªxû½kÜÔEbºdP…“&— \jì¨Ö¤H|*˜ap&ÔÁd;´!)^‡P±?‡R«ûI±BG|˶akä"@’ìU‚Ø'ɱHJ©p›Ö§l¹O\5Ã|5ŒŠòö1‡"x M>ç‚ìÞJÙd‘ìíK…#M—@ˆ²¯%],Ž^Fv5¯UAWÀöá"91+ɈpW$L•A_$¸ê·"„RÛƒ®EÆ©Žˆ…ÄØ£ŽTDÁ×”8Óüindé$rÂ’gÖë…–J½L‡,8TÝ‚<\môù‹Q L€ŽG†°†×c5³u„’ê.ëžã±áɰ€ÒH¯ld˜ô%Nú$Ù“­%?u#=æÅª{Cµ}ÔŽl¹+"‚t†È½¼ÔnÆŒ¨ÔŽÆ±ðÑU`}+¾6»,Db¤qbQÇ 0c¾xº?%l_4,"¶½ZUøY;9´nî÷®Z‹U-L-ÐD¾ºÆScÕ̵ډÎJÝX(È.§®ÃmÓkÌ)³NÖFâéíoŠ¢2 •ï{I Ä®®îz¨©7ÆhRDKúæ±e=@[)nŽë–k¬$ª‰ZB¬‘£Häæò¾4\òHpQȈøÜ’4âÀŠõqŒð¸Xß#›éÜGñáˆÎXN%Q¯Žœ^îox±Ùé“4v"q_Kókyç…ö¶=Í #½ÒÞîü¤¥3 h¯*zÙ9 X—++4¾Lƒ¯×F\è€g’_§Š\°ñ×6º‰§lv…î¼gOTÚÈdà§Î”Já뵨EO‚|v*Ï„¡-¯ЬtX¬zš „ùBçphXÉÜQÈHü8ÓÂ‰èø©‚rËŠÑbEb €!@­änÍâ°’BvF`¹‰¨÷"ªáÇ›³9(8âŒÄ&|òÄãkc£Z‰šÌGp s4H'1'¡d:6¬y.¬DÇ VÈ;†@Äô¼SUÀ~™ ÖDl|µ‹"y`BhÐÐåt.PëêâBdH®.A…çu=j½Ðã$@lÈÍ;ªµcç@fN¬ ’]CX2W¹ì5zò £ÑÓ³ã9Pà:—ÀR ¬GaøÃzHdŸT,âo•JÁ #wQ¾2b½ ‹þÇz±&!Q‹g÷(aè&3å:­±âù1žå• þáuW™Î¦†C¬z Uì#çä8-…tz;ÄóK´„È£½&?aBž>ÀˆØ6pZò=m1á 8`©1FÈ\™SX¼é«yÛûW- ´íŸJÏ%gʯ0²MiÑÉ1òëù4aµ‡Œ¼§Æð…­R:@ÕZ‘\Æ«<"ã)¤PqEcHŠåYì;­F£*¹*\ð¶¼"©"¾UŽ¾Ò»Ú”WŒªE'·· J*YS\?d’è½+Ýg ?F¶âè ÚÎŽP5‘zE§º}¨‰¡Ö(ÄóÀ®q–¾¹P@†ÄPF5z(E߀V(‹]p´µ'CišÁÒWL5)X½!„%›Ñ~Ký>›FCVëRa†GÔx7Úäp#3–½¬Y!@_–©ß™:¤2ÒÆ„Ñð†Ùymú%»W¤í^öµü®!ª_ìáHûŽÆfû±´ßÏ7Çsz‡²G{úµ·ECõSnp™ÔmÍØþ¢nr»æÚL6Ñ} Ùv$zìáDضf5›>Ôæ·hÙ¹?dÚ˜¥¸¾{[esâYvFÁG!Ì­´ò{U’ç³Y?!h;…ãt“˜@ýBzÕÿ ×-_ðù AÊ}3T oï?\•SU7%tãH˜ÿé^‚™+¤ ¦¯B4waº¥½¯þjUWøm«Ïþ5Ödá¿Y §ðñ¡&'ðï¡"ÁèON¢bôg¦®ÏèßM»'FúlÜNŒôÑú7ÓlþŽtß?£Ý8È=>Òk™²¶ø·8·8·ÿä¿ÿÄW ¡!12Q±"Raq‘²Ñ#3A¢ÁáðBSr‚’Ò$@CPbÂñ4³ 0csƒ“âDT£%5`dÃÓÿÚ?ý˜tza3. ¥¼ÇjÐ7¨RL˜?V9­ç£9:d!!Gg=ô¤Gš·F)ñŽ5õJÃÆ »ãK.~ᇆ–œq^R¡¥* â~NäÔå9¥²ª ššsí„y5&Á²•Þ« Ò/ ä¤"ˆeUéñŒ•‘2cùFérÊ»Û_ŒyC',ÄÊÚKÍK"ÕzÑZa äåÚ8Ím„¤¯UzÒ7˜²›6³ÙÛgáXi´<«'[gÎhX@Ôj×Ýñ‡eÒ½D§° ñ.ܧÒi4çA?ÓÉ6r·²,³^J=­{¢cÈì”í²Ï –»× Xt¥Ô¨Êyüž„¾–XªWK ÍE ×ÖiPMy¿‡y*õÄÛ‹ÑÅNó³<+ý3Àø9¸úË ®4¥¯f Œ·•ÞÈòÜ&M«oU:ó§œV‘å(KŒÈ¾²¿ç!ê@B$Ñm¢waââ»#[–5 LU«!¾6ÏœÑ[·­&¶vÐî‚éZϸÂTŸYß ¬ î‰k¥^Ê¿,e ¹–2TôÂx@S/ú#šŠ¯6‘÷©ëŒ­>©ß'%\q»· @"”¯xòI ìòÒ­Oõ@•”¯ ɼ赕m»u0‹ÆùèpŒ£(¬™-*Ýù›Szã(Uë=h žªB™\Òm8¢ÐÛ§Ö „;M(Bœ+Õn½žø‘þ`YìWvÔI?…F%–¸f'ÆF}é£3-DÊkç­;4õV<¥(VI“u¤ÚZ’E)ë9‡Wðï$×fuTÒ ‘¾–§[ýe×Á>¶Â©^Êá D’zÑÕŒ¸åµX^¶ÏœÐ—œLµTÝJN… z¸ñ{Ô‡BX]Û„ì|PT1„X¼„%k»Yãl¡Þ3c ©jôÖ“¸Ä²µgélϾn2ã3ò¯N­Ô¸™9•­_ÃR:ÄeGTç“òU8“CöUãü;É;<<•h}DíÙq§²‡šá4®¢‚‡]šsi¤:¢4ĵ§qðŒºB^¶uvÿlðµ»ù­¿ 8C¡éó]L­%øQYtí5þªA¼U”gVÍè ¡Ï¥Ø8ŠŒa$ 0V›ëMgNÚ¼Cv¾H,ççyam-µê¯6ºFQ˜TÆB”.5v SNÞjíþäüÒ%gJÜÌ‚Œù‰Î¦aŸÖc&e|·3•¦™”-9¨T¶›¨æ-'Ÿ8æ„6…ç])´q»µ†’ÎÜáS&´æºð0¼–Ëzë§Q;ƒ',4º ¿,4Óe@‹¾|Ûé”fúÓYÓ¶”Þ†˜ew¤x,Ú´gê†äš„7*Ô§è×%+íN~ªÇ”låex9“uÆÇtÑÖ«5ê¬eµ´ÖHbVÈC¨) @õ×M,让?Ùvåvé\Ô#˜Ã&c(.FnAò†âR¤):É)µZœõŒ›,d¥î-—Ï-Z}ªCmÆQ°UdklÏáAzð¤¥Z´8o…JÞé]×F}Õ‹ +»dZ^ÌÉÅTÅ%_…^ËN£ˆñ„’Dü¤Ô¸Yh»™ ª¼:é¹3ƒå. úH¸GѪéÑŸ‹Ñº<¢Q¼m>ª“‡Çø~Bššihmš” ”)BÐ[IX6i ¤hùíÑ Øµf¼m”?Ú'ˆ¾·ôvüç…U:Àލr]?ºr½5G~Ìë߉±mš©í–T1P<(&Ñ{‹¶é’Q†ˆ} ´_ž¸˜¾”ÊÎyÅ_¶»+²ºʃQzfþöáü>P¸„­Æô¡Ökœ 7œâº#&[vE‘U¹©œã›®/ÖÒñ‰ô¦&ÿ a¡Oú®ú÷¶´k uƒ¸˜.¤z$W ô…»0Y²&Ske…~ZCV>AßJD¸¬;,¾«í'Æ2Š›V^œQ<^}G±åˆûGwðö8ÞnÕ›kEsÒ´µÔ³“$Ë&ûE* s ÿL&2<Ó™BInÌdÕH:Ým…:ÒÈÎy ך½'L> õáðNâ/·á.kB‘¤ŽÑþ X,\>ÔKØN“‡]Í¥²±:+–f€ýäѱϟºF_ô¨êÜ?‡¡hB›*’à¥+ê¿&<[S™*D©Wi ¥=TH'Õ²%DÃÓl-jf¨WE‹TûÔè‡ÔNˆZPUd9ÆÙŸÂR¬;3hD`4¹Jù#Æ–JŽ>Ð…•9,ûYp…´à<"óTž.ÜÕñ()~:Náü?&d'²¬¨y—Ùh¢mMÙuÖÛµæÛUEâ’¶ØÈR¯ä¬Ÿ"Ý…¯ [Á¢ñM°‰„²BœÔªlZëU)×HuÒ0oBïÊMß+7w[]²åèÇ+á§mK·wCogÇF0óŽ£XŽè.84ƒ Ï-z­ö‹=êD»ë^­Nà­Á¤BFZ{/›÷%½Àœúc/W„€˜îþ‘Ùfim°ýå€ñ]YN”¤Tú½Q•šÊ®Éʽ‘禤VÖ»k©ÝPq茜¹÷r,ªr›ézi½t%4§ÞÔ=J14Ópãü{ÖÜÛ´áe!«%"ÖÊíö›Eµ€ÙJâÅãNk%0·Ðt BÓúRèøCKM¤6,õ{óÓ*ú¢z1•d¤U9ÃÂg{‘lú#-L‰™»@Pïþ“Ö¦ãÈÖA÷òKÊ<§9=Á'—Ûè†ICwj×ÙQ¾´Æ‘2ùõZÔMkÑïÑŒ-гŒ%jW¤ÍŽèi—ô»H̨iÖ%\Z•dg;3o0ž#VU™[4î+gçò~M¿”|6¬ßDíè‰l¡?;“\viðâÛ¨WG]3õDß§WVïáþIdÖòЦPá M7FDò^_$»ÂYq{h|>}qÁ[ýÂ/ ¯²k݉–4§²‡uaV.ÔÕÖU$ö ð'@Ý mÖJ‡Ü_¹&(·u”Zè5îÖiÃéE:ÁÜL%‚ÛÖ’8»j!¤:SiÔÑ=);‰.f©’›nÀ pñA¯_«¬óÆNz¹6u4Ñóš&ý2ºïáùyRLe6h°ÚV:4Ìݱä÷–³3E5mZ™^­Ÿ¾x£­Pã÷’Ÿ« -¹ì¯Ù…7s£Îôæß m M·3'm ÝždB[Z½$Êý5ÉŽvnƒ,‘§ç ­Zï»ó?>èòÚFÔƒW‹-é'Þ¯V¨;ã%XœdgqÌèNÞ½¦'=7Ýñü=•© ÍYÒ©u m6ÑO“CI¡sê›_ÑU†ÎÕlÚ:Nhà¡§j— ×#ÕÙ×€‡lm‹¦îlý-”;ôcnš`0“£ç ×GºdìÝ µgH§d]¤¢ç÷šiñÕÆ<³JFH¿íóÕ<Ñ@×ùêÆ'…5Ód{ÿ‡ Ð;Îßõ¢<‹v‹m´çS“Ò6§nÁ×CÍ:¡¦ºèωù¯„%Võsá¾¢4Â6Ë1{_^øŸ™D”¿ qVÚÝ Œ¥>æP–žJ¤üí¤dŸÒ@æ¥6Vm½:jù<Þóü?=•Ó“ïäC€e¹FÉóm¦Ú¹“Có˜~`¯Tųƒ¢²ŸGŸ ô€ºè;á #L8³A&„Ðüì)²Š8 Í¢ؤ,bSLbA¶Ü—™´àóš¹ôéì댙=“ò|ÕS,\ó“£q‰ßKÕï?ÃýG¢<™]ÖP´4™r޽ù퀪è5ÿ0œÚ`gѨ§IDÕ(2ú¤Y»d“4~‰J“êå,%Ôel­”ò•8\ÈͰSu!—_H²‡Ug§7d ºœÐù*)'M?‡Ò<žEg4gâïù¬!áÊ…5Hë6{ÄB¦¥™¥äÃ)ÿ¨ƒ¸˜ý3“ÿ1¯oòÇ霙ÿÜo±ÏÉùA“‘©4ý'Çÿë‡ü¯’kü¼©˜<õ@?Œ'tel¥1—çÛBX°ãš‰IN~¼ÀuÒ?G¹0¥¶¤”-½q›64?v°‰×Onâc×O]}ÐïÑëþ˜’›Dª-…QÝ”VúS—ò˜£C*_MFøœò’zoP)‘O¢• }]‘U9¬§'B”¯ñ|`%'Cn¿ü¡Dúê:¸B”¦¿…Gp$¥Q3>ûΤ~¯è‰#Æ™.xØTèÙ¥'èÂ|!|Whtöî‡>_ðþ>¨³ q†C.&ÚÙM“ÇKÞŒ×BmlYIJ‘%ädüϧ˜àÿv½ÚÂ?ôÝA6”,ÓþK¤ö„9äClªÊ2¢–­œgÞÕ1‰?!§&×a‰Å-[.œN+ Çÿ§³Í94±ÿEd` /È™³¢eGî éú—®gGZ|c&ù%ú:Nj]‰e.eÍBVÒIûÊZR:ȉŸ#¦˜™œ O£Í¸ªõ„‘ò[.Ìfjekòžø—2LÎFŸ\¬Ò Jm©®.ÛM©i=Gª¥6óÿèÓꌓҬ©‘ [j—Fv”S^”q“ê¦ Ïd˜šÈy=ÍT¡¾€=Ñ•¼•=®/0ãÍì°àöTÂ?OΪp³ûÁ¥!•šu„Ó¬3ÆH™™h4AH˜{Ñ¢ìÚ=‚‰ë"%˵~ñl5ÊRR0Ö (›tE¶D&`µ®ãcþšŽä˜~i½iM††Ð+ܩѹ82—Úm/­^’©)§ã ¯Ua.Ý.í¦Ùµ²ïÞ@Çþ­J™|µ.ýš&f^È;T=Y³Ž¼Ñ2‹!5þœäWEs˜òzb^)K‚eå9½ziÏŒ)eZ8Çœ˜—±38Ãmmp¥*‡’ýÖ\ÊeaÛ£@jŒú6ûéº<–ɲªK™bpÃåÛ"µêMlýà#(eÅM&é‰{¶ÿ”Rmã-iÖÔßížmjqºbU¶Êì>¤¥[-b*1‚»e*Zö%$îÆu)/Âg–‰süêM{&?õ_-ÈåIéȬ“¼­T¬¥©¹¥=5Ȳíy¸Ö,{Q/ÿ©>Q»”KY:ËM 6€÷Ÿw\el©9”µNÍ-ÆÛÖB-Plѧ>ÈÊÄ®bò¶°ßC³ÝüÐWtª #Oº-E£5šÔóTî¬dy;‰G§]M•+Té¯eL5ú´¤Òßã)ÝJç®ÎޏRTºçíB•|è§ðô£a9Õ³ûÇ“¹>“+qáe Ž9Ìi^Š“ÕXjzXªÀ²U°}Ѐ–õ|÷Nnõ gÑ}M¿!nÐ/WF}ô¦0¬ñH²vFDm®ÆMå6aÐÁãoÃNåáeÂÒ›Û0¯ørET"\€úIúZ!¥ðLŠûÊâÌLŸ2ŸZóÓH¨OÞ"2…çp¹Ò¼CK%6†¯Ï\)æÜM©uZNÒ•#ý@“MëǮҪ¡5¦ßáóÏÏO\dY+-— hO‡4="êG›ë1–R§]BU[KÔ~yâ`Gðé6¦eçã–ÚØõ“œŸéÓHÈì Œ ÃKøòİô¬š› k€ i¦š4èõV2"xv3Ã_z¢r\?/4ŠUnê ½zYdžSÖ.Úÿ–­äSÉ™®¢r]hûè=Õo!JÈúßt‚;À@jÒìVvæÓ¶•®³]:"zQ3@PVcMœÃÚ<\bjY143¹©›9ðë‡vòÑN3zÃg¸õVÏEá&¦±“òwéG.‚¬üöDœÈù3ƒ²*ªŽ6jéþñ93ÂÜ ­!¤úýÚ±$ÒFœÛ"kŠ›MçNݶ=XbÁU‡I ÙB¬RÆ­‹tn÷e})Œ)æžÖl3ÕáÍóÕíÚÕùí… #[7]w9ž¶uvüç‡.N] ×fúFTy.ÏÌ6Ù-$ñ–Ížšµ·³èÀM¤|úÿ…é‰F³e)©êéLK8‰›Å „mÍýâm 2æƒ×´BÒ«ñš°f¼ožh|¨³gélùÍY^¿Œ?•˜É©)Q ™N”YQ§Þ¥žÂsöÂ<©vÅ»JG™p’ÙÙë)êú;SÐ… g§®2t°m“0¬ÃoÀTáMèHFÚRIu› Ó²¾ø\’‹Öìñz¼k„2—Võ´¤ÙÛ˜oð‡”®CgAío¤e'&m+Fžh“aSÜ¡$£Öi‰vn,Ñ(­¢µYNulѾI:!ÔSLe9ÀààéUSÖ7Ò´¿ë»í‚2º.§f€Ò½^šúá 5•òikDâhRšzÆŽ6¯1ãg‡Sn$¥i4P>£ü R2{÷)f½?SͲðiJ£Ö¦åœ#%LpÚç¤K 0™‹z¨¯Ï<%6}CÙâm!½l8ÝÚÂÂ\Öó_gܬNq§æZNu¹[)Û§Ös ûHŒ‘($e-ƒ“\ƒg¼MŸj–WªAíÀ‡–Sèü7Ò$”\zÒs§³}!åkfÇtM%«åfo•T÷u°‰¢àz×ÑÛTûb\錼ßëõæ‰I¥J® ÐFUÉ­e¸\­ÛkC’kN­A„PQþCþÀ.‹ÏU~yá—Q,ž*xðm:º"<š*­) 9ºŒ0 4óz„6M«?Kç×M®_ÕyÒG¼Ã^cGé÷V“Bòç5}C›Nn˜~²é¶§6}ÕÓÌ„ér¾•fQ,›V³s#ÿãïïÔϧ×Õ×H:"á%6†®Ý¾øje*Б ¨¥ÛgWÖ~|!Ò§u·ÿ‚HzÿéÓ!±Á‹ç¹7´ª#Ú‡Ò[M¤Ž.ÜßÞ2ÄÉuËæŒŽÉr9ÏXPÞ"qT—óyðßH˜šp¦®6¶U'L!jb]jµgé|õFZd©¹g@ôy—£69ýz6Ã?#á âú<ÿ<ñ“ršå•tð·/´çÃN–ò@Ugäm…kæë4Uv€œþ¬únÌþÀ„KÚãæsgÄfÆ4Å 6#"?Áæíh´4B˜Í®ã(–ÆfÍ Ó²ƒ7VÈmW¾†úA¶hèÛ_bÊ~k‚«#NÈ[M•^ƒç>}q6¢%óÄâÒëåH$´#bIVò\­ãw7zô¡§á©=Q•^²Í„6Û}L-Ç53ö ô§wôö|tcöï­S7HŒªA”léŸÃ®@‹'dÚc%e+‘pø´Ï8®p(2!‘"ijUf…B”J½D`¢¤Zmý¶ÉÙ6oö4D¤¥SiÔÐunÓ*ÖŒñQ>ˆm¸ÉŒ—&FsÕï¤JgfÈÖÙóš'¥gmYâí¨ñ¬ µaÒÕ“­²í.ìkìÏ¿F1è´½]©ãà‹F?G»fÕ‡©¶íÏËXÊki™8«Äî†R‰©šŽŒñ”Ëm7tœäÐŒiLby úÃíxCs©—ô-ßuê¤<÷ fÊQ`óS~ˆd‹6¾Žlÿ9âaGö|á Ï Ï¢ðISz‘-6O›pÞ5ÏáÒM临ƒ¦„n$?jÐ çÿ*i#pÖ¦RÇ,Ó?Ý×öc)L6Ÿ6€ßG‡ø ú!)¦š‘‘ZΤi*Α¢½º:á¹tµ/ÅYÆ Î™Ÿø+Q§ìëà͒ž6ϜЩԭwi_fq‰â㲞¼×UÏF~íb_,>Æ£¥®‹^à`yi>Úì376†¶•†œ!¿,r‹’·å7ÜO8QÂÌ9(ÄÄíû¾e;EO²€vìÝEi}ëK6׸gŒ¢ûMªÊÔB¶YYÜ“Á+¨Øw¦žúBƒ‰ôc7‘ ú?KaÍ¿4Læ“ÔM0'GTQ#äÅ•lÝAHŠWÑøCk»4^tDܲuÙÎ=:l‘r:_<"`”³ËW†¶;”Ûi›‰\ÈæoZÝÓ¿ã†Ð"ƒæ±f,ˆÈ啕öŠP<Ð\!›'[eFúÓ¥4ÃîU6ưóe(¶Ûž{gÇF0Qô¹±ÝX3]Ù­½”VúS,èlv|:ÜÏQŽP!^ÊIV-4«+WgÇF1>ê‚m]¿9âqWÏZ(;j˜nÀváê‡9 {I´œa~mVW™[4d §n\ó.rFi5N1Í<Æ›½p¬ðsq4ÛÙ l¯U ?q~ÁÜú·?¡H±¬•º|!YÕdi„©Mšk&&¥®øèÕ?µä Œ¬§1E‹,zÖh”ûTŒ³<ÊàRtBÒH#x Š´À 耒thSOŽèv] 4èªwV¦šv±‘eÖ¹º¤Zìñ‡6¬Þ[>0¼ú>tC­$Ké=@ÀÃÒ£Öºtæ‡R©ÑÛ€‡VÒQl£Ïl¸ŠŒaî(«yÇføá(–6m:W¼ŠcVc2ëøÆP6Ù²œç7Îxz\¡Vº+¤o¡§Çë’⺾á^éo”s`ª"Mµ¥4§‹Ê¨Ý[PéUª«FšÖ¾ºú‰€›Z¢×Ùãn¬7’ÇïV5Bû¥P–s¤ ãLðìÂѨãöÏå„e7U”-[ JOy BçwÓ0‡:,|=“eŸÍ&í‡y$+yM1‰†œ–4y%}áÚ‹Cš²N}™àŠÚr^MTôÂPª†ýjøiÂ2£§&Ë~e e^·Z?˜vÓZQâüõÁͦaNj$¯ìŠî†ò?ÿä¤t¤ûÄpEXuñ^`¥÷¨á99³cƒ•Ô½ÿJ2b¤¯L·´¯VqŸÕÑÉÖ“mÅ릟yêãÒí’7i*W¸ÃϹ)èÜ.öûé#I†Üvͧ…ÒÝ$ÄÜÕ^µbé;F~íL¬isŒIºâ´¬ÉÛQº¤ÄóI™–º ‚ç+5{I0‡e]¹Rl¹°‘Þ­1…€ætéìý¡¶ÊÎaS²2d¼®DÉü%õ&×>sØ*bs(É›Å*©>¼û´ÂÒs托·Z¾q4ki#uk„%ròJ³.„Ë+aó¿éÛ‰‰§Öó=cÜa%aëWªu<õáä©[S”k]Ð&œoõe8 6QJÅ Œas¬£Q…þ*CYä!n!î#zæ§‹Õ¤õ9mÇiÐvÔ‹j`VØ#· 1•úµÛH{8£A)3weK·²Â÷LadjÛ–vÝ»ùk„6ì¿ÿM_?š•QЬ`¼„®Ãè km-`šœ!V˜™ÿÊdUX@˜QzàjtAK)7½dò,­CÙI’fÅRálu§x’$l5zT§\è;ôC¬°—­½‡}~1?”Þevs´)'IÂzšj:¸D¹áZq;‘æ¯ÙW„6çfÏ¥VÍ?¢W®¤~$øÂ¤j›AUj=ðÛ N—)Ûk.¢ñZ›~pŒ¿&™¦x@M‡ö'gH;²Naó× í ö`##´Â™FhÙalÕêJFu×ÕdÊïåg¸î˜õ hC¯æ° zã%Ë6û•ºvÕ„´—ýQÐ÷jDL~òOt˜i©)ç¯:Pç½Àò>Å{–Ã3bUëjØm7‹¡¼œ[×Qõ¯v°/%óØP—åY=Ýof4fÍ„º­„§úˆ•e8;ÖПչcòëû0Œù Š~ÉHH+U”çVÏïï) nMTJêÿÉ:jvzµé Ä›*™˜ºlZsfaÏ¥TÅГfãB:ŽèCˆ™M j´#xŽ6» hvÕ'¢p‰irWoÍrŽl „:亓iõð”íBTé‰éj²Ç‰­ŸGT#(2ªÞ¥ äÃÉ–š—ýX'¹ß³ æµÚWUÜ&ÝÎ9ö³w©1o56¢ èþ|baÖ'P»Cì©8(™D±6D²ÉûC}iTÿä;Â~Ťû¢Yꢿ;i-é|ì'ñ7ù¡÷L»°ÞOŒ©‰›*óçŸÄ懞}ÑÆSAö’J}¨‘~aËÚ,£oÀçÂ2>Oi–¯¦š ·Ê4W²š«Ë3¡Õ³,àSmëéM?ê¬)²§/\Y±ÊӀτ2 æ§¨ô†dÓbñáeN|N¹›…±ÎsÊüóBÊ•ôÌ*-YZ´¾„d·<ßq^s”¬Øš|óÄûS¬®Ã¯%-m¥¬SÒ¬¥ÂU°Tî…J"rZíY׳âsBqÙRh­™½Æ&P¡QJß±?á“Z [Ž‘Äk:ÎϪŠž6ŽuB[²l;>sF@–­f@ëÌ<"~`©VulùÍD¥ã&¨ÛBa¡†Å颩^‘¾´‰—ÞuhKbí§5 W¨¼8—fËBù[-'ÞDUÞ/ñ½'7Ua笵a´­•"uM3e‡j­š½ê”žJ¬ïN˧?% dö&}"Ô×B{©0ÎFÊÎ.ú]ж¹UBpZ’¬"Yå39våPÞÒ ;4ÃËâRî¨úÁŸÆÂ“cBïz‰÷A•[:ØAÜa2ëm›+q)VÂâ?51‰ÆÎR™â¬¥ŽUöMìÃy9׺S+ût‰|‚/­LU Û­‚m‘NOj_Š‘øùc-å•\ÙH 1´¸gÂ'ÔêÜšq.Ujë}ÓÆ`Dª\vZªM=ó;'ñ/, |˜Êh[JMجºõNŒµ#4˜÷A5Ž:¼BËdlÓ³ÕS9JEjJ­¾ÞºJT zÊBOÝ&.¸3Ö©xžŽxJ’=®ã)Ë%™´b¨;|4zâyM¡êÏ_ØÇøH!(ȳý9Cüù«÷~õ!¦Ìdüžäë—ˆIR6ê÷ÊN%“')F´ü퉗©ŒÇ³¢o_›Çª°äª™RÒ¯ËZâ¶iÖ»5ê¬Q降š—g…Úy•}]•ÚSŒ=*Ûº³ÔG»ûD»J½i¹”º¥µŽòDMKziÔ§¡%}À¨“ËéJ¨Ú’Yåã­„Nr˜óI Ìr@³Ždã ¹rÕØIu{#"RfYJ寴‡=é†n‡¢Pµ=ð!rRóÚYq{ãE¶teÇ‹ju°öËxIÁt¸›i@)ÛTä|Ò'r£’¬Ù±CÛº¢'yõØ+UÎÜû´á Jð…ÖÝWõ_ÅÆ$äÊ/ðv[!<ÔHíQ%²hÉ’|fR¾´+rŒeI{l¼”Ž4¿7m+¦£eaa$Ó×Ñþ1H¬/>ˆòuL,LË…‹÷5AM¯¼hßj£)·OGŸ ô†â‘xmR0&¸FXnÔ¬£Ã:Ñ™~ªxõDÂ-µÅφÿØÇøR2Œ'U:tæ†Ò+gײ%Ô%ºi¿9²”ÇW}Ç 6F¶Êí 6‚õ§3 ¹Îê˜e„ÍÌñè˪ÀÚÂ&²"œrõ %ªÓAÂ’ò€Ó2˜5âôÒ€]—5 ~¯W]!rÎ7®Ú‡UwV“vÅ»{e¤oµLaHS¦“dÿ*’¿ôÊááVmÿËwòS ­)´T]NšŸž%ßvBgÐ0¶9VOv–°„M­—/jÆÛ*ÝJá5éIö(õ–¦y$ö¤Yê¬Md¤L&Õ›±´SÝž%²]ϤYOiÝX °›H@³¶©Ía÷x¶¾Žß‡Â PÓOÄ=Æ'gø;6RM­–WáH›Ê6½0 Ç»X¿qÍF«Ö‘¼ˆ”µ7v”µì&ƒÉ9=2riÃ=Ÿ^ÏœÑ0yÀvû¿bÜkhÏ£çË÷©Qûùž)£~Úp‰\ÏÆSALÒŽßØ„7ŸDdÚðÉOå9ð‰ ››>½Ÿ9¢ajä¹øá+JÚRɪkÛZc¶Ðí¤hÛQ»1ƒ2™ÍD„tf‰gƒOÕ.¨p¡¼EìžS"c“a{ìÙö¡æU.«.¤¤óq‡j „iì‰4Ú]”ç'ç×>YµJQÌÇ Â%d–U’ÒB¶q}Õ3“u6Õb›m£Æ±?“šjf‰ÒüªI¢°‡òXmËÔ+ÍíøiÂ&šEÝë-¤¹ÎR“í‘ ¥[‚¼!n"Ú/¶ZNúÓyhCwJt‡94QÄ1‰3W½-¹ïD;.^˜óÀ3Ф¨ûF%ßÿÚéâ©f%CŽ®Ê–nvü4á5ÆYc2³ó$“Ø8Â&mç.^ÙŸzj1„ΤÍݸ°³ã£¾ó6,­‚‡•Êã(.a±D¹+éÒ>‡æû–£Ê™ÇÚÊ­7mëŠNºÙ=DÄÒE´»^#ƒˆ­½ZG] ÁBSkTÐ Id°Â¯gêêšvÔŒiBp²8;~m?ÊAî×t%I}úÞžš*›¡¢aɇM•7þ_×_Ã[?zJŠ«êù0ÊHzÕ3mŒ®’Ïû´ÆNU&Ø'Deo4›I©NÑðÏLÎ8«(Se[ ïR$J›™ã¸C¬çÙV9&‹Äª¨Ú3à3 ÛÕÃ<-¡5-šÌrAþ­\a+v]þ(§X‰| óIþuR¾0¹&Vi.ûkûéN~•”Ö$%ÝDÍ•"„ÿ2í §¯®25x?mï¶w*º½h)Ûm«å Ù²´­–«º±”rÚe}ÛÝ+@ÞD=– ¬ÙA%[(¡¼s¯>8G¢O |+Z†’?ñ†‘OLòGBÂû¶¡‰Wf½[j±Ê"˜øC®]¹m‘yɪ{Õ³Œ$Zk…¸á¼ú¼çvháAÍR[ûªèAùdÛ»¢yŠUÝ&%2äÛñ”^žB””ÿ!H†§S•$-¥À‡¶VÉö©¾&-­ê˹mý€í(„ã·²Ò5°§ØAÞhœa©IÌ“”^›BV´LzTÔxÖ2îMC­ðÒUkm“º•‰†”¦eR‘Æo\TQ9úhk_UpŽ 1É¿Ï ÉË~ùðBŽ)¬71'%ªïH#x„åU)VRª”#}T%ú— £œ+Þ"M…+Ì÷ÍŠ©¥Fí³i±ÌF„ÙGB‡ÿo|7¤F[AÕ3Wöþ ñH‘~ÑКæßÎÝ1Ån÷¦ƒ½Hy›oZZË)Ûœ÷ja€›F¶vÙW…bJbévU¶}D×vœ!RË•ôi½ëO¼ÅÛ_ºz½!Hï„ÌژÍOÄŸ2æbc^ë®/iv’—æW†xý$ê³1ULrs§ÚXö¡Œ¹”ÐÕ”)V¶[4Ó+ÿî¯ð¹ùc)e7,Ú¿U´^êV&&YwJüPfÖêmK·m;s#¿fÛ¤çGÎØNMZ×v•¨¯`ñÑŒ5‘äØÐÊÝéP÷‘(Ê*fÀu-+`Ýq¯Ìçnêâã;´L¿(¿e—ìÃÞu6”Øe;x¦½I$Ä‚”©[Ä…<ªB‘ƒ'Rø–Ýó(þ^?²Ý¥a Oñìp Ë[A;…U„5IFoÑ1z¾œøÇþäÊ\òý°ª¢pUî[žwZm?_”CÙg,-OL ¾akÖ3?½Í÷’w ¿F²ât) VªÉíøDÉ#JÕØ¿­]¼fªFÚ‚‚N*Ø&ñÜÈÚ~kWžm 9@|iÕ B¬[)6vÓݧZJåÚ²µõS¶ÅµêŒºÒ®©šÖœÛa¨ÿ „ÓMØtñzîrCi6ß]Â6Ž7úvΕ75-P«C àFV kWqð„Ì3d7ÆÙJo†Ðéõ{HüÑ&™U‚xÛ#(äµ[°Ie{ˆ$cTf5•د—m3OZqÒÊvŠœa¤K1¡W)W¼AšK(¶o¶XVúSva#ÑZ?qczD!Âêm¢ÑNÛ+À‡ß›³rëó’ WŠJ“ŒJ0^g„æ¹Gòëû1/$L­YAP賂¨p†2 K—•6¨€ !XCRrÒ#1?É¡ï.12Ȉ÷HÞL0«Vl›[>:!Y=µj® ‰ivU¶¶«ÀçÂh KÂHFÚÚpŽí‹®Gò¶â°Jè¼q¹»Ä ævìU !l¯ø:éÉ  Ã!×Bâ˜ciª½”ÕXDÃ)•EÓ“jžo˜-¾úQº î ¿…^‡îÅRoG=Gz‡ç®5aÛ•.ŽlÒ?ªq„M[Mµ(YÛ§uc…õБ÷’w.¦ÒÙvÕ5‰rÂf¬”‹´þl"u·s¦Ë~«$+f›$ç†ì%@¸h;ssúâW(_ºàÉ»êŸ+—š¹C\N®Í"MÛ…´èŒ¢/dˆ;1þÿ±ù>„Ý—ëÇé…K·0ÕÝè+Íš¾±Îsc $¡ë¤go«ûè‰ùD-6Ö3tñÂ',j5 ”Ú.›;h¯p¬KÌ–_5ÊÿÄq°†Ü—Êɼp¥§éOXÍÏ tæ©¡Îbq&]ËËch¡èMO=!2L„ÚmÎ.ÜãO«8ÁU±?‰>0œžúý$Ú? ;„d9÷C(x)Ç5Ÿ½ª:Èä®Xi ùÎMQÞµg‘ÿÓÇPÍû“¡ÅówÉi)Yk`3·á§fQ¶×a ¦çmQºÕp‰Ù$«ÏÝ"s÷k Ì_ZÒ¡H;”ablèœOáW„3ÂhìêAè'p"—jÍ«Üݽ”®ÿœ×UßÝW¸DÏR.‚ªŽd¯u˜UZvâ]°û» ”{NYN0üµ¥Ý²^ÂB=¥§]̜ݔyÆyVM;®üãÌñý_•›»[XBß·®w«uaÅe+6ï“gnhg(L.gŠÉ,r³dñ°‰†Aj—†ó•ž½°‹RÌÙijy[(¯x-L~ùJ×vÔ/(©¶î–H_&„âÂ'€k…›Wœ‹·?-1„O-2÷¥ÖùUI¶°†™É³:XR:éÝÉ.õ-€ß(p›DâïVžµ szÀ‰âÚܽ—!mè®q‚¨p€ Ò÷g[gÇF0¡E}Gö ðȉ˜–q]qœSfr(z‰ƒ“&C´iÒOÚ§mc&¦fúÓ¨XÍ›ãŸ× zñŽ2q1”Q] <`K­Rù“ˆñ†ZmÝL;e¾kN`€¨mN¹ª¥·Ðî):ÛÌØ|+Ÿ?Â"—™³$åé9Èô~¯UàOg>ŸTYÖR’FÔ¨@‹jéèãt茓Ic%1vW==ðÐd±~ëh éI8b[Œsð~U =šÚÂ2Œ»Ek@xÝ7®l¯7Ušž beÆÙ]زŠÞ1‰ùó,ÝÛDº½”#1‡'J/VÓ´ñ°MNÛïº*Ø$u' Qm×@we¤œA¦0¹•¨Þ0£È»ZqR@‰ÉÙ©Ü(uxV%féè‘}ÓÅïÙ‡å\pð…¤2®e ŸeDÁSo]¥û—E à!d]„‚>³08ÐÂKj4pÐóƒáHqµËú!{Ö|ˆR_SWi}–‘¾Õ1„®ž¤ž´äBЧiçÏP0$—Jå×wt—°©Ê©ŒÁt¶ëI÷ÇKZUv’Æ^Bì*e7;BpH'”erãƒÞ%ÑÎâ=ꉓ0«y•lÓݨ‰9‘<Å€L?/.–m(Ü ´>àaÉT3,J¶=ZGzŸ"%Ô+JÄóws £Ÿö ˆôFD¤¾Lš鹩›O`Í×óizdÉ&¤èRwÐFUšœ3E-L% A졇›šÉì4\4¡:y雯gFxJS4«#:¶ÜúNhm¦Ò̓§e>DÊÌgY_L-·‘©:?í¬oH‰&€]Ùs³>ýÂ&LŸ¤YOiîÖX—wüóAÿ° }Âc'<›RÓI–OªÚV­É&;“îåZje²¶µÆqNµQ02þK-T#õyéÛ£ÿÜÙ5¦l¥ž6Ìþ5–¯xU‹*¼ÔõW¶”ë¤9pâ-©ÿ;²ŠßJcÍÌIÞQ%Ý„„÷¨ ÏY]‡ÛJYÛP¬S„*be‡­¡¡ÁyaÆÏ±jß³ rAi¶]¤©8*†?Iq¬52’®v\Oy´ˆ}ÇT*üÒû =Ô˜JÚcéS¨øA˜z`p†œ*O$ñ;öa 8ëÖ”¢Òvƒ]ʸ»U–œª¶hß&fZ®ù—öO´š§yÊXÓç53øfëG-Çp‰ŽM} ,ã –SZåC )]ÐaŒ˜õÍœõé0ì¢ô¯Ìrh¥{I Gµò­£YËΘ˜—Kвµ-£°!Åw­ðµ"O\-ô?Ñ bR^cΡIèX_pªÎåë9Ûåj`»'Éóª“~Ô¹½–嫉ì®Êý˜ÊV¦¥(|×Aq¤;,´JJ?>¬ß9ù¡…æxÊÉó¡CAÍû ­m¶U9¨4W¬æíŒ“$â%¬Ù kj”‚•\"v] ð†Ò-èhiÙ_‘ µ78ÿ*W9âl¦úU Ù.¡Ã²¨'ª§ûÂq§­ êãgAßá!EënŠ'mAÜLL×袻¶¡§VÒ-¹Å{eAÄTc¼›N'¡^…*OR’®é02’PÅ&¯û‰¯e¨¾,²‡¦õüå)Ú@=U„?:4¸†æžµeç UÛŠj!÷çÊ[S6Ô—u lZüe4ë¤*mivåJPs“e}êYÆôÑ]‡KŒ3´S„&íäÛ½6vÙ^êV»¥ÙZÚR½Ä„ ™¦l‚[?eCúaàÁrõÅQÊH+Á°¥a+~ÒÜý[—Ÿ»¯ìÂOé­µU o\Mž±˜õ¥¦c‚¾ž9ˆPÿñ•ˆJ5¨:ÁÝX¸MåååÊUS×EPá ãK]° Ç.É( I=1Á?WôZõSi§âP¯T3’˜W§=‡»XOeKJR‡Kzâš;F~¨o)¥¡U¡)G»X ±mÏKüžsÚnÒq„%cJñ®èbqÁ7v¹¤©{.—¾Å1€“4«æÊ¶X>ƒ*ë34p•Ÿæ#Æm×åó¦ë­>1`.Z®´?•I_úeF2dã3MÝ8¿9ɲ½öiŒM¡EË U“›ÕAÕ§šÄr2“wˆ%9ý{4tþÃeÃó²ÂƒÍRÕHÍÛ§ª±7;6‡®,©¤çÕÏÝ&$¦Ÿu˧*ógïà#)-ùqw.’õ‰B«€¬;50+_´w@*:ЂF’?÷DÊU®Uøùa„7±ný¦œÚ@‡Q,æ«Äô¡Ä÷’!÷VY²ÙW3ʤK–T›\M ªZU¹DÁSAv‰ã}]OÅK8ÂXšw…YG¼Ôó*¿…j§]!Éۥݭ@/`6±MF0âܼ¼új;µµ„<¹™†dÃCj:BýɇRòÕz‰ð\ÿ–àÄ¢˜yÿE2ÒÓ‰ï!0ÔÃì¢åÆJ]äñUí$”ãÏ$úTS«Â/omh §mG¸˜àéCC—ÇŸ‹Þ³ HnZª¨À«»Sï‰G‘(ÌÓì¢ùnjf¡Wâ¥:é‚Wzà7X¦±+“fM¢ žr”à¢!91²Í™Õ)õ*=Ô‰×ÙÉ£ƒ2ÕëÜ‹${DYÆ%2â¦E©†•*·k=ĨÄäÛèM¤gOH÷c&¢rYɧ^ RÔQ5µÕRGX&Òžš¢ÖnyYÕ€ª°„È4¶.sƒRüáöm@ÈjgЮû¥IO|¦2“FUëA$=²•ö‡“rqZRh¶ÛüÕ‰Ö‘0Õê¯>Â÷Ò*˜—ú¾ NK2ýÂeêyȵDDˆCs]ÇÂ&P,VƒoÏ„L6RåÜwVIR(~¨q )P¡ýƒ$Ëpv3l¤¹ªhsöhëŽ&ú-8{°¼ c :“ºN×(µBbaAh{³ÞD)¥zQ%O]Q\Nøý"}ì¯òÂe2kÌ©_{Ã6äã¹—Iê ßHzunk¸†zZ»¨1”W.î¢C} W„Läù'„´Ú¤Tæ RÕÿ¶T]"g'©ÎM Þ¾q›´ŠõV,©ÄYG?Y ö+Ù„ß!ŠQÛ§Ÿqé6½EÇ9Ô=æaß:⃟UeD~ 1†åý&aæÿËçÿ/]! $|3ù6­tÑ¿õ,Cbðq™7ü›'½K8ò¨ ²ÒTU²ÂÆð5’Ö¶î¦ µì HÆ´ÆNJ&ì)Å]mG‚pŠ% °†ïÙÛOqÏ„#%X·ÿl!.´¡GëlšöRÖ-8¡1¸™}WE¹«§F0xéªõà•ÊlЇR8º+Óûùvïf•VÏœÑ/+.%åZ&—zã>l3õV¾L“L­èçRwî‹Àî¢.ú3Bß .í“z­Š¹dcM¬zñ3”e ¶gßJCæaVmU_;iË´[Q¢vŽ1ìMU„~–fÕ—@J¶Y'p"&¦KWuæ;Œ9}*§UD·¯ë§ekÔ -ÕK®i*ÎÓš†¶«Ôš‘ÖnÃDvèÀçÂZJì;PÎÛ*8N†¶î—¯³N:1‰GQbÛ©H{febšŒa:FdZ“™t­³ØêPpŽä¹£M—O?v®ƒ™./U›¾‚=ÐËy@=hº´¢Ìõõ«äÙè„¶è]ñQ»åã­„34ó*¾ [å {$Ú ÏBßûBóý0¨—4͙řÅlÏzUéUÝÔ (ÂJ …!6ºQî$@ ¶«+¶²éù*‰O0ÚËßÏEW mÖÍŒ>ÊLµ}_>­18͉Y%<]¿ 9·ÄœêÐåÛˆ7ÎG‰‰¼š”¢ñ¥ZNÑá§”a×´„(º;ÔŒ­,ýP#'Ë:Ü´ÑY)jg½„Ó®¦,ë8Gný8èO£n×@øFM[ÎÌq˜WdL:¥;š½ŸàªÍÏÎ6l…‘ÏÖß ­ÙSŨ¦mz?ß ú# Ê^¹z¤ñ6æÝ[XCî]êÃd95dºnyTQ•‡^¸]Ø}ƒ?6œãš{„"ÝCNìÑðÆ›X]Ù×Ù_xÍÌ_.ùÅ×(×»­„&NÂï™Q-ò´`h¼!²ú&o›ÒüêHöT¤œ!Ä%OZYÛQiaþRéÙe[Ȧ0øp5|EÛQÝ­¯fbˆ¼·zªâàª!o âí´Ö«„-ÆÛ×lúj;’a„¸ãÖ*µ¼A„¶eUeµß+çnhbeÔ.©Z¿«¢€í N0ÎP^R–º·0ÒùKZZ=«(†gV¥Ý¸Š/fcŽŒaò«gNÚ¡Â%gX´ãD ¤Bf­®ÂR ;j7øBBì\í¨ÝZá šJ”âR”•5én/iÏ÷k:-Y®}¬¢”ë'±$÷A„LMLú-t”£ýB˜n\ÞÚu·Nêî„ML­ËÕº‹lû©\!.¬½i¦ÒÚv• ÿUaç‚uŽî&æ­ŸÕÈS£ÄöWeXDäéýU»"é mçÞ­'«oE&ºÁfï•C»[ Ñ‘”àMãÉ*•å¯ò>þT“—4–žfÖ*vš DÝ•È߯[ ÂEe®Ùέ‡‹ß ‰ƒDð} åÐÜÃm»r”Þ»É))ö–œc%­ô?S26Ø_¹1|…=i ‘öO¼CAeÕ™<£@yóV¸R&Û¨¶Y¹Põ õ¦µí­`2'›ÝÌ)‡ÈP¥¬þ®½ù?ïYHZ¬§9è‰6Q'-]·p®òËšœl7Ò2d²Ø“¼³zæÂSýFP”nð›Çv+ã©ç.JîœØ GâMQŒpqbÛªZ^ÙajÅ)#KwÎÝ­¦ù£ÝCav©gm…Œ*S6X%Jüû;áÌžûB«¨Ä+¸Ta¡ç-ºÑl³\EF10…M¿aÄ–‡#Õ†h[EÕÙWškh¡Á$˜,8'nŠÖßVø´òÕes(µöùa¥¼Þ¢IFŽØB•*õ¯HžÜ4à ¥ß%›¶¹@S l!`•YÎ+cY·CO‹@ùÝ”#ĤÓÊvézŸi>$ÀeÙ£p}¶>¶õ¾í»^Ì*UÔ¦ÓkINÛÖÆõVi,úGiÑÆîZ† ój+â¹þ`òz´Ÿ»Xˆ}ëM½Tí6‘ß³ní6’Ú]J;ÄBÝJu’–zWv°ìÃM¥:€µð‡ýZ¬­ mâAÂyëVYue] !™ù…äëë«Ns”Žñ›O1眺û&ßúv£€-zçi™vÑp·pìR“ï f<ù¢s*ñn’ÝÓ|ÃÂ%æ¸Q¶–ê¥6pPIÂ&æ ªn[ ºßüÆÆB›á ³RÑè>áH|‰ÉË¥ù¥òòN0ë¦aËwa·¶“ˆ4Æ%›m©ç*öTO² J¼ÂµZöNúDòšrÃ()g˜„ްH8Gp6êhö¤“Ú Àl²«(%­ºØ$“„e‰;ö/ÛM¢œàŒÕÛ˜ÑY÷ÿ¼ôFC¼]ò“æù_ l!Õ¤»t›è>#“ýêìõº°¥¥•Ù¯™ÛðÓ„O½Ã'n–nÛæÏ€©†äË¢éJ7ûtûZ1‰|‹8};éOݯv°ÖK›+¾ µÊ¢E>î¶2µIkË ýÓ_ÂU˜Cë±fí\”Š÷A°ŸÒ,MYw‚-¥¯ÝÓ„?•Ó/1ç¥ì}Ò®åa2³ƒe·öO´œ`dä8ÕúWæùJ6}•QXBò{…Ê•cë4á­„ ŸW­…;H#} \“bå ‡?œ©õ 4 Øvml8ÛW 6æ¦tç÷ޏ°òž´ÊnÇ:‘¸ª¶“èÍz¼!—TÜÇ÷’wý p”/Ìò­ôêìÀšh·uÁ×oe¡¾¶q‰b›N8\NÕ{Åa.XÖ4Çt^93èEŽ»;ȇRáMË.Zs“h'ÚQ Æ SJׯQ ÜLp)wZ²¦•keâ5!2r‰yl)µ„·¯ÇÏa õVe+¿S&ë”}”ÕXCré˜Wôg’)¿Wg%;+=EÉN] }Š•û0r‹cK¾Ê½É€¹§­9zÒv¤8®êTp…4ÙM§3'·XSŒ*_Ͷ–ú!Û…ªÊл[-ƒ¸ÃNõ°Þn‘ï5‰ãs‰weFúÒ2t—zÝ¢S·FúZM‹«¡cmSº°Ø j¶0‡Ÿq> zÒ7˜˜\Æ›8¦»á,¡åY.g¦pk¼æ‡B}îöü4áRIr“d†œÎ“›O¬iÍ·=4ÓÕþí”Z]€8Û>sD¤°–” âVPºåê“ÄÛQºµÂ@6[ήlÞ6ÓAww†ÞÀ•œlÒ$¼Ÿ+†;P¾A¦ýÀɲrˆ¶É=°‚jÀÀ}Ewa}™·Ö˜Ã®8ÊìC¶£uk„$19éw“Èó³7–/´­¿_5¿Vˆ™yft[$5¦‡·59âpJ½1œÆhVIv]6m?X=ÒL55ú?õiçLÐæJü) Ës̳ͪåã±M8õP˜FQßN ÇBT®âU¹)…ݳ0½–VœTÆ”\¢¬¡7ªØ(}ô‚ÊzUf´o\”‘N¢3õV.fwiM+`xÍŒ-®«(ªÙýóD»™<Š¸É ûCÝ ¥/‹-8J9)Û ÚÕy{v,mªwV¸CªfæËd•l°±‰ZJ/4Fڀτ0µ;¬•µ÷î&ëM^¥Fò•S$l­*_ý7ˆI,Û.j=^¥{Ä3,äÐqd!ÝG3’z³«´B™•vèÌ%ÆöØU;°fÚeÛa~{e¾”ÆÂe“jÕàíÃNq$ð«sUéJÓÞJc‡2 %Ò”æÔh!ɰ¦l%UVÁ]ú1„-)c9#G¨øG-=neE è*îZ10ê‘è‚ÝPÞ6ã‹rñÝM …{)%XFN”³¨Ÿvò!pHaÞ.˜JÔ¯I㺰ehí¶ÕÅ0ã(æë ßF[…KÒ…^ên‡Z[+(X¡>ª÷#¢2MS¯_¸Š5Ë%;«k›!,ØFs²ž9¢LÙ—ª³‘(§//]o•Zà 8CùCÍ ­«wš„Ó?‡] evÞ–«>ÂÓ½"0éÇL”6gù9´8¸Â’â½ §X;‰„ñWvÞuìøœØÀ›§¯|e”ÍŠ¥jNWŸDO¡AËÆÅ[¦–™q ¢…çM?´; ÔÒ-¦Šwf®* Æ&rtë®ÚmĶ:ùì…ä§H¨Q§Hñ®+&e%øLÃ¥mmw-BrÚ%Þ´ŠLpGx}ùg •–Ù<ÄSQŒ8Ë¥«ÖJœÿ˜ÒqRÄ3'|õ§Dí$ æ±1+*Ú¬¢H©[´+¨ˆrVÖvФ}‘á”°Õ•×™ V)ÝMÚ«Î…é&//nYIS}˜*†1>„ÚBÑN»NÂq”%M‹NjЧ?]suÒRØbÏ K³<‚Úûå!>ÔKÎ>ŸG¾žñ¥&ZU‡PVÀ-w-NWuI»q4Yý³Ãó .^Š–ù@…-aó-çïKƒ‘Et-NÍÌù…©,r…S‚¨¬!(aÑvû×–Ô{bA©t"ñ·Tò6–N @V70.l²U³F&&&TQxêFÚ¤Ó¨p‹ù§Õe©„©_aIï$ÉLùç<àäÁCŽŒa Ê)fŒ< :,÷¨bZñÔÛ:»jË$û£-äî0l7Õ»Ní€NˆÉr SWÄñýB>äƒVl[<ô÷šFNÊ|5wE¯Ô3oÕÆAülØî¬I<Í ßJ¼y [bŽjLþtŒŸ”8"ìL(–v”¨àN‡ê/6‘´xR¸Eã¿WŠ|a4í0‹CL^§“€„4o­5œmÌœA†ÝâÛz¡=î'òc3ˆ¼–=M„âhž¼a(–6æ\äVßrÖ~¸žÊëEË)ºoð÷©º˜uoZ^®ß„:âÝÖÿiê†åšð„ó6¥nI8ArX&¦EtæžÁŸ-"Ú}6ÏŽŒa!iônƒ÷Ò7‘Ê'Ó"ÏXVâa¬¬^rì¶»¿ùn~JÁKAÛ—ã¼’ öµq†Ð]Ù@µ°gÄTcɸãóOË¢ÐsQHE¯Æ¤Ó®”m¥Ý©Áob|æ(´1‚•„Ú²l×M %‡–›IiÅ'hI…¶ñBZu%+VªO¯¯@ë0Ü“î)9þÒO¾%²|ë°¦üÎÒ¶Æ`áä«+»mfÖÍèÆ%ò!a«×Ôo:B±IT.X¡›(m•>­±ú:á›!TVÊļ»Lªôy×9Áé mÍSwи@ràZtséì9âù»6­fÛe[©X ¤þü¼"¢Y•½FÓá§j´ CdTGÏ«L.U¢õ îm¹ü K5\®é¹†×ah!µk„)HR¬„‚­™†øË99²µÏòGŽ®0ëe·,C³ý©fVû—M&Òöfª‰Æ%åša°v3S?扜Ÿ';¥Û³öùc&dåäôp„ªôÓÕB¡Ï¦¸WfȘ¶ÝÔÍP½„â£×¶&Ô%æ­'2všˆUÜü„ªêózá9éÙ˜õV2¢ÖÂîî¸û*7ƒgÉÓsrYÛòü¥))ö¤¯Ù†œiåYmIQÙZw©P?z{ùa&Ö®|7Ò,§æ°¢¤ú<ý`o"X¹² Ø3îÍ!M¢ñ+âíøiÂ2“3Mz ¿÷’;ÄFP˜JœºéÎM¾–}¨RÉùþÑdéõmùÏÏM'›>êÄŽ@zë†<Ù’T;[}»¦_%U^)´1„-ÉU^”¬ýYiÍö)¯´ò/ ¸›HPÀŠáE­VVz wCˆS©´TçÓD÷¨aIq‰oJÕÿ&Ïõ §sŒ»Ñé6B2Ze™¿iaÕs¨ñ8Ê-Y½º;Ub€wÃÆBŶ%*ödz{JÍŒ ¹wÚ»i–Ô¿P̑ګ)Ƙ'ÊÚ¶Û£jQo¸ ?0ÌËŠuÖî›O£! 8$„0Ê-ú§’|×´í„ã nñVVPÉÙmº¢!µÊhyĦg“œûI N0Ïë ÙÌU³7¿4Kê¬?=PúÕÓ®»« Í«ê÷DÄÊôгÔOtK—þy¢ü•³Þ¤™,Ù'`muí³O0.œæ]@}¡ãÌ_LpfÍ¡ÑAíPDœ¹V…+|w1ÆQûký#„pžsÖ)¾%%‚µ\¯]7ÃHë40Œ¡"XzÒlíùÏÁO*Ém™¼cÊl˜©Y—B „gYÍDú´V½B°siÿg"Êù³5g¯4T]ºúëi˜ÅÞúœñÂrèQhéñÏ„KL·>*”+`ÍŽhÊR¦j[Ž.æ9 …{H´œc$¬ÊLpe8V6(¿4e Ö¯JxýFíâì/̵·>áS„6þPeëb¡?m;‚«ÞP¡Ík?=PäÛMŠ¡uè ;„)ðcN£î§ì&Òhãwk¯¥†÷¦©ï„ÃÓ-Û±kÍm¢·R¸Bì0›`ÞºpÓ_%‰¦ïeYŽNdûK)OFxZÚÖÚР´k $Ó¬;!¹y—SFÚQæÕïR<ÉÙ5 ZœITÿÔ©Cñ€ZöãôšæÀì!³°-©Œ8^iüɳöˆF* Däú­AóÍ ãæŠPÆÐ¤÷kkjrb_Ò²¥û[« ±6Å»æ¡Nð!ùpÚ//ŠÓ¶Š8R¸AšHÒŒ„4xù·Ic•œ{*¢°†¥Ê]¶¥—µY°Vx𔙝¢¾ÂTÂ7'fÌ´š”Ÿ¬¨(ƒ—¹–ó2ÿZ°Iü×ìÃFT³a˰­šwV ±x„Ý#h¦àk„Oë&]ažYZ{µ·ìõÓ6-Ü·}É¿g}å1†B’Õók7\¬ãÙ†\¥ÚÜ_ÙiãýèaçXEÉQè¦øDã“-…¶[Czùµ{4õV$¸2õØ?>02ˆ/\ >Ò|b‰œ^i=Ö@“ýã-ƒkºL;,–Sr,ØûIÝXiõëw›Ÿ‹Þ¥bÿƒ9xò‚‘´›X «”LÓ³;‡åö)6árÉÂ,ã:&ü'Â*M§3'oö¬>øK·  y9‡´x¸Ãò!Ç-©w.ógö“QŒ6òÒ<Ù«üš‰!8Â<ã—¨š omÍÐhpƒ0¥vxâֈA¶õ¥ƒgm•xD“™­´jº7Òpiüba*­)ž&‹{vzŽÚløG”’WS!ħˆiŸ4Pÿ€ÑM£‚¶]S7›3Ve6êÏ==êCrÉRm,fÛ»ž'X2þªuƒãi¢ÿfõƒá“ÊGâOŒ-Òt5„.ÃìÙ-Šì9·ÃR©/M)M  æ¡MµøìS®‘1*ófòV¨WÕ“›Â8<ÞSÿ2¦åÏ5•w-DÎJ­?SÉ 4ðŒ›“ò¢OR’ÏgÆ ¤º};Öz”®è1q©¶9@§»[XC²ó²2þbi/uêy¶u¥œŸû*»ïÙ‡²»Ó­­¥6Z[~AzQ0•‡¸-ëÜ*ï^Í[§ã³^ªÄ“/­«éY¶Øo”¾6Õ„M²›ŽqÓ´8#)_MšK˨#™$o¤;%•e[áE@7ÉT? 8C.e<¦^zÊZô•A v^¨”ÉÅH¶â¨ö¤ï­1†nLq–…¿É$héÕö¡¹UÌ.õ@M¯ùV†±Z‘I:ôÍ-©þ·þ™T3’ÛK×fÛƒ–[r¸¦°¦Q)1Ä WÜ_„70§ ¢ÀQgÒT„„õª_v±3•ÿÇþê“Þ Œ’Äß ¬±mRÜ·R? –UìÂD¢]¸(Mï$ û@ã1,ÓSN´”Þ9©š•í pR–]¹ÎH >Ð1‹ "ÛöËÔR¥ b]n<õ§Ñržj/ÊŒ4Ø“EâÞ+NÒp¡8A™aI¶äš‚vÚIÀBej–ÌØmþH§x8ÂåÕ[Ù‡hyÛîZÏjíM«`~ˆSn)6’V¤íJT­À˜rÝÍŠqºG1‰éfË–Ùp©í*N*H0û¾„æþr@ßèU«ëÏ7ʨ¯`ãaO¤Kç'±^ãi:'¨øE‘óH˜–›O£šî(oLd¤ËTèùë‡ç‡ÝA]AÍöUáYÄ¡ÒxºkCêæÓ„e475“ˇ;ƒ?¯ž™V}ÛüM¥Y¦2j”Ÿ°çMøDÊv'mCƒZõ|õÄê/iC7¨ÐŸŒT¶õ¤ñÚÆOËR„¥3 ()Z3“S£ÕQŒ Ä•Y¢ml¬,·5 Øè A=ê«¡ªõD®J_¯ó-ËIËq˜ä‘ýD㤦l8›•l²UŠA„júõW|¢‡²¤…aí+ƒé_áW„8¥Ë5d¥N+ì«})Í\ªË ß«¢Ï~Ì5äí¶æ¦JæuÍFÂКõ©IHë"¦%´…°{¤Â2„œŠ.œ•ZüX"°ôÔäçI-Krª üÛöbDMÚym s»9˜`Ø•ô9©TšÄt…H2–¦ÝSbñÍJP׬VtŒ¡,°h],ó^í¨ƒp›7® bV7ÆJBÆ—HíŽÁß¾@à­ò•G}–í«uSÖšqµ'h&Ôêž´ãÉ Ûf»ªbx^%¶Ù)HsüÉZx¸ÔýÚijèB¬>‹aîC¹;)f\“õš‡*ÂÉÊ—]ío\ç#y19*•Kù…ÚíO~Ì%ÓTJC®r‰ö¨ˆ-²¯HÛƒ¥cÆ2qi(¼mAäm9°U„6†æsƒY~Wþ'ìí0²Oe…"a’ÿ¢ºÂ{ÄB$Ĥµûë«ÿTxÞЪ}¨’^Q½§ j¼ËAÜbYÙ´3e™„K+b’WŠB¡Ç53úMOæðë¤8⯭]ñvæ‰Ùgç¦hÚ,Ÿ´‘‰ B%‘]€¢ã;j7\"VxÝÝXöfß«Œ%EZ¹û}ðtc›}!×OÕŒ"IÊ.ÁÌÖ߆œ •9«Ÿ°o¤*aJU”·Uu{óDÈ.Ë8Ùvõ†o_>ŽÈmì÷NæF»vV'›º>ožoðÉ ‡¦3ÄÊî$m ~>ˆ˜qßÝ­G ñ’' °ýn¶œø ÄÓŽpKË¡cm¤î­p‡ä•0ÕúA íц¶%‡Ÿhú“§š'ÒëOù§Ò+ê‰yç%—vÍ_VÏï›Éí>÷§M:Òw(Ä«h*²ã`+fcˆ‡]YÝôqVPRU²ÚFò<´òÝfSuwtßÝff®(ÔÊIû À…å)g´)÷í·®3ñp¡ê¬I¸…LÑö”üµ«º“ “¤¥ã OÚH:9$…ašýôÕŸúnÉ14â^UÛ —°ñ1]‘Œ ªò&¨¦hŸ«4§„&mùÿ<´¸Èä¡ Wp—i´¢UgÓ7®,œÝt¡ê&Éa79Üs’B»ÄYÆ’S««ª CJ¶šÎ°¢ÿ$,i$§ɯK[»·ÇÙe{ìÓJ5-P*6š'½H••ÂÕ»ÁÎ||!Ö›N’"g‚Hè—[gß“|1ëHEÌ<<"Sή–ÙÛTî­pŽM$B,ÎËU^l|ì‰Éd7ªäMÉËÍJÛ «²2SnÚÎ¥\r½_‡[ɨ›·bÇ™Ûm½Ö«„LØHêϺ€Lý”e¦¬…U[-É ·rã©ÚVyUÂË­ëæì;« qJà´ýÖ½x´üT¯UaÙ’4·ÙCº°çšE´úmŸÄËœké÷übUÊ=k=´W÷†fVdS³Æ|9®K=žè0]*U”çVÍóC-­:š=cÜa…8dŽ6Êí ©7õ®øZÓIåW‹›=?\M9aÍ1•›´õ¤Š5ùÏþO"³”íÕþHõõÃ5}\Ù÷g„¢ÎtèùÛ5‡&Z£Šó~®7;gõfk›FúBe½¢k^‘ã¥LÊÛ(«»>:1'òx–jùÄß9°W\Y è‡A[hTmѾ†&&ïQI„†5ܵ¹)}jáVvÚù1)’Q0›Vª¿ß7Â&ÜàÖÔWíTuÒ2k¹AÔ[R‘}²ƒ~Œb@¨®Û«Pgepœ#()T4’9Š}Æ$Pýò.¹UHµÂ'„ÄÄÅíÅÒ6Õ#ß-OHË¢ÛˆUæ§µìºé ºžF/pê-‚ï&ÒN5¦1”BUe¼çbsü"fE.Š©d/a¨ßš2i¬q¸_"Ê»ô»ö¡‹nË]¨]/` â Ám!ûEÏÕ¹@ì€UìÄÃAZ«' +Â&'Ó³g±]ÛPáD”݆›§˜V%î‚/x©@U{,ÚÂ+] WáW„$¶Û2ƒçZô‰¡Í×K'î“ÁæSÑÇîD˪}ë’µ4²±îŒ’àT¾fûx»á%ì>t¦Õe+/‡7z‚R¥ÕgÒ•øÁiJô‹;÷C“+–]ÚP-ìªwƒLa¢M¥jíùÏ~±ÿ£á PgA½éøÒ õþ{ dï=i×h:k€©†M-Ö!N¸lØî†ÝqÄ^)®.Ú§uk„<²™7º›Ä GFñã ©wÖ©Åéù0 …ÕU§0'uaõšK†Ê¥¦£ª i:i®yÊWjË)ÙxÒ°BÔp‰ìäÃ)¹\ªf]äêûJ8ÁDÒ奛-k€B+Õ mKÕèúIÌ9*ã(¶´QAJðIQ‰ÖÁýáÇÂH*°îevâ+ N5//¬GÜpîD¥Ý(çš2”¼£RüYe~$øÃó©î ÃaM}mR0R’¬"IM^Ü´jï&„b@N0ÜÈm›+E° ]Úˆ} 3>ie¹~Q©öu°…J¥Rùœí4ßHË®¯ƒ]Ü…uø¾ê˜ÉŠmÙI'„8îóÛÙe[ég—njÝÛȲ½—«¬Œa 0ãJá7–y¤aš.$Å…Ú²òJU° ˆc2ì¶’7V¸Cª¤Í zS¾°ÛiúÜbù¶…¥ª€è4*Á ˜w.4Ç¡î¢;ÀDäó³zUH%JvÓ™‡lO $¨hVŽ~ßðòYþãuËA#IõvÙ!;ÿ¸š~ûÍ Z+ÙZú´á ±FeY¦tRÒª3içݨ»véy¼üØòÜ7/4‘wSçHÒ3êÎtz‰Ÿ2EÛ&ò¹½^®z ¿Þ<Ës&BåÇVäÑ[À)Æ›.µz§¨ç>mðÝŠÍLWŒæ¦œþpv«*pƒ³9ÝXeÆ-vV^Â€m±ÅÛ£C„2´Ì;xÉ´º¸*ÉÂÊfÉì¥w A±/þ\ßt‚žøLN%%›.æ;4‰fzÚ\UŸ°çå÷{£'>€»Õ8€çó-#a3)”HYAq?d«p&œ‘ CÌÙ.©£°%~ä˜Ê¬œ¡/Æ£}iñŒ–ñý*œíëóxõBÝFÓøUáªjm|L!µí²AüT¦1/2ô²ì]^3´Óq5Â/ i´Š¼6F ¡‡²e§¸Hré;ð‰lÆÉÓóÕjæÍñµ²Êü ¸ã®­„gu½tÔ =f‰=DÂ$íë«ßwߺ@?y¼FêD¤£’ìÙ±El´Ÿrˆ‰‰Æe¥›QYHsSˆ¼ý‰4ëa_¬ •sxCåF_Õø’7˜•@~C„iWD\ ›½†ƒ}H³/.›W/-<ö ©|ÀõæßHCÏLM¸n’nõê¶Å;UŸª°²Óz÷c¡m«º£Ê7-Çy°ÿ$X¢Òq†C|K•$Þjg³_ÅJuÒ&¦J- $»²£~ŒbY>§]¤Ï"µÄU>Ô-ö×rŸqGº“ Ϧ8¨½éÍÞ¤~•{ê‡âOæ‡ìëÖÕ9v¡—}ÍÖKó™aŸúˆ=Õ*—qÖoÛQ+é§xˆnMêÒÚ«óëÑ Ë¨3aÄÔì$„9äü¢æ|ÜÅÌ¿(…+U{1ÿ·¥ýsʼ 9äô·Öà|#,ËMKý$V£ã£²¿á’&8.R“xê¥ôz¸«6MNÀ c%Ѿ7ïvå£Èî8ôÅ\IºÚFŽ­1•²YR¯˜P-rŠ’ee*öc,K±0›§H±ÐNÁÚód5Ìšàšœ!¼¤‰D=’=Ðë“ÓòÕi Hæ!;ÈŒžVeüè)é& ïë sæßÒÊq6ª§¦˜FS—hKçÝl£'N]‡k˜=LJÌ%ÿFªôÕû12@ôJAÿ¨Í†¤V Wt› ‘”d¤\õ._¯òÄ…ór—ŽÏ%hÚP½Ök„8‡T›A<_´‘¼û¡o×ènŒ©, Í–T­š;ÔŒ›,¶¥®Ö›+ÙTœRH‡Þ»Ö@ꢻµù¬ptÌ~±yv­¹ýÑ<ûͦÒZvÚGæ¬IºTÍ—-lªO¼ˆÊ£6Zp(ó+ãN¾™o>ÚÆÀãKöµ'JÒ½U¾/z‘ ·\Ó­çqÍJÑë]šuÂTâ5°Q»y­µÍÙð‚ë@TºÐóù£)e WW(„¦›×6T,õ‚z„7å´«6!ås¶±ÞH‰ü´ÓíXà…µs8“¸˜–Ë«fá¶ïÎßå·§uc +Ü!¼¹=ubù6¾ÁõõDÌäâ¥üìýÏCn‘Ú„ðË«33A—o53-6ºÔµHPi”Û 8èÚV7 .FçÒ8ßuJ÷[ G¹t›½U;i^ªÄÓnX¼¼%;F||"B]V¸c®Ù_ Ÿ~ˆxÞæöû•‡Ò”Lq#ñmZªvƒ]Ñ-+6­C­ À„ q¦]_?š™Kk°³FvМ'vppo4š¿É¥14N0•­k»ûPcJ.]wn¬¥{·Š- `(ò’cHËÏÌqM{Ç#eÌù=!:…Ú}Éfï4Öõíìç7¥BóÖ<™ò‰Ç¸=Rä¤ûJ9¾ÔyDìÊ—d.éžU¤ûc+e%we\}ƒ>"©Æ¥í1*W¤4ê'p‰3&¹k¹t¡jÙ«ŠìŒc)¿,%ª–‡0®àLe™©K@X{`#~ŒbBt¡»¥É¡Þ1‰’Û¬ØQϰÆXaRãVõþNcíVÎ0Ûî9ª•·Ð…xCa‰eY»[ŠÙj¾úBß2š|ä!Ô-VxÛ(|)òHœ×Yo©^àa”&œðÙèW„L:»›,Ô«fô‰^2/̧ÃN4m.í!E{,«›×Jc ­pz¸ýËPø2éµBêvÐûÄK7Vl hÊ, ³fÚ­l²±î¤K²Út¼‘÷„Nå,Ÿ)èØ/uĶX–•Ó*ã øDß”(wRBï¡iñÒ/Öˆg›æœ¡”_\åØÚB¼"ÒúðÛûY÷Á@Rh/‰ëà6µqºèKŸCza¹kbÒÖBvçþðÚV.ïXWœÔ²/+ÿnÕ:á-ºû÷è—ù-6xf“ÒÊîÜW³1Å5¥!æø"‘÷ˆ¦úB€aÐÊ‹ÛjßJD»wÝ/2ù'ÇF0Ë•ݸVµì(YÆ”…»w¯QÐ » Ä›6xª9+ËQØQ ö`I‰¦3)Mõ({¡El‹@z~0†m»x¼ÈÚ|4á -™fl•^+ayüõî¢ :`4 m‡8»h¼-ºè„(9©SÔFð ~´‹cͽ³F:1‡S(¶çœwf¶9Æ0Ù=q:ò¬Ûú;baÒó¶ôñòG)+ô;òe_åŸ%Öï²Ý®Ó“O4íûn—Í_tLe¹Ü­+våP9v…p5Âel¢ÛŠRÝØA8èÆ%Ú~Í»9¶Ônµí/“ˆñ†xD²îü}–†úÓ3S®ÂÇ™Ûh¬K¸néKEæËißZc ¾´½hµA·1ÜkN™ŸHnº=ÚÄßšvÒüâvéñ0ãsm꾎ÈTëÒ4u»gí%]Ò­ð©†^Ò±Ø|!*)zÚ¨¶£Übc(¶#°¸{)´—­7Bžƒà 7<㈼C.m-8œp„M¿:»l!¶œ ÂJ‘™s퉎M•h$£Ú„åi|Ÿ¬ozR¿ËQ%öîšd){(R;TqÓ3hôm×ìÑ[Œ=3•fŠß²¦™·Tžªú¡ú͔̒vS¼@„K¹jÅTê¶ŒM0©Y¹tÚRê6ܬîI0þLPú¤ðrJÂ//Ey¨wVi·a^u<À§¼?$3æ¥1ʲ¤û*XC23kÕ™i#¼7“ -W7H¯ek„5/:ú/-²¢ÂÏb •„?"„7tç6R˜œØÇœi®4ò%PÞ¹U§R-Ô 7t‹ÄŽ.Ý?ùaÍÕZ9ê7ÃÒÍ»1ŸÍt{µ‰q7(+0´Î·TÁEüÆqwÐÏz†ïýW}u*kT^ôÓú©†õócº±—&-X ãlÏý±ÿcÉw”™åËþKJhã·ÇÈT2ÉÙˆñ‰6’–l;(al¿t=˜\»MËñw YF§„)d¢Øtßl5š a3K•Ôó¿<ôŽvõ¤Ë.ÎÛÄ~hUjÕOÎ'&Oꉬ¢¹dÚ±xžšá̸to0³™Ê¹3É¡>Þt{Pú 5£Ðî»ëVm\ù±‡Lº¬Öõ[>†å6›A|;T Ü Í:Í•M8ÂyÊAìÓ„%‡ôehêRwÃ҆ŷ¦TÙERÆ ºŸ6iSö“ï0üŸóViüúwg‰d¤ÿ–lKýº+uaImÉaUW¤â¨Ó ýU†erdÌÇ¡S]t‡R»öÚ®USÝ$+á¿/éQø¼BJ•%ç›úÑœö ¯~U©9~#…ߺ¯,¡™lÞŸ“d÷µq†ä¦“¦h~á ³Âwè×°xèÆ.hÞ-Ò %jî¥CMÄ«umâêþ¨¥`v©!8ÂæÑ<õ²…6ŸùkÜXDŠ–xêý_•Zû#„>Ø”]–üë;~>ûm8õ¥¤íH*ÁŒ2Û zÒÍöUîMp‰¥%ÉË´µ´‹8*Îè)CÖ”¶£p5†¸"´4³× Ê%rv’š½²žó›K3Êô“C©%[‡%$o¢áäQDn¤~áOZ°¢›´Â”kn®è/É¥VQ(·•°šwéªAfˈ¢¶R¸Šˆ (½j™¶ÔxÖJ¡Å~þXC'÷ °7‘ ¼¶õÚ§aÝXY·«Ÿ ñ6¤¥6•™;h|#*Np‰ŒÆ­íϸŠáþÆOƒNË?ZÞA4ä“E`LJyÄÛFqó¶›Zu›³ÝL!*²Úª®‚7€!å/ñ0îm0ò•7«Äè†%Ô¯H,ý¥%;Ì,¶Ud7B¼)h¡6”•¶ÊŽìðòÊQxØU¶º€Ã­Þ³dëlÍý±…6 ·!fó“Ewˆ³ŒL!ù(–CˆúËm“ÙnÖ›ÖµYô©Ô¡²ë®ÞY²µNê×àéqëK^xàÀËùÑsÐmw-C’…^L¸_?V€R1c •}ÍyÖÙù扶½\ýGÞ;!r¨CPòxO"Ú{Õ±íE‡–ã´/ù6ÑÞµgàêBm"ͶÑù« žªÚû3hU8Ä»a§­¾R¤í O¸Ö0…;t›èW„]³fáž3¼›*Œ:xê¡°àéI÷Ò\°Y«e¶Î6©Ÿø[ ¤;ÇŒ,pu]ÿò¹ƒí¤”{Q*Óë²ûJloT*X =bÏz‘,vfSoeÚ÷Ù¦0Ê]jZ²è/Ž~. ²bÓÈÔBû\^ý˜“},zCo¤î…ÏUwm$[ت7‹–F16åäçÍ}š«¹j?F6ÿ¢™Z¾ë‰ï$Dœš×ªŽÚ'¼D5(†Ù°±dì¥wTD™i†÷ížøL4’‘RÐpt§Þc„¥½TDÊ‚UdëlùÍ ªÛÖ’Ø#nmÇ<4Ê"`„êgÃ|L¼…z[ê³Þ¤JÊ?nmðëöt a*c/e+¦î’æÏŽŒaUVŸö|œÊw­”¾>Ìûôc:m éÛóž•­ÅÛ›ûÄÊl³d¸ml¡þÐÐ/"ðêm9°9ð†‘v›J¨NÚº¦&Ò‡=;¶~Í}ÀÀe%ëcWl,Ìl‰„Úû ü´„ ä®Ãï%-m¥¬S„-aµY]B¶PÀˆ˜•Q]ë) sí$b¢!¦ÚV‡÷JV;é™ ½U“Ú7ÂZ œºM [kMô‰¶RÖ€ ¼!»nKñ‡¿t%»fË™•³ûD×uwj—[KÙ_x4ƘWŽêÃ’²×Ö®—gm¡ºµ…¶Uèµ×g½Há(àÖ¸´ùçøD¢‹nÔ;²•ÄTcJËõ•øÓù¾sÃ2Í.c‹Ÿ¨ðÊ”[S@=²Ò6©Œp0Ó7ë]âùâJY¶ó¥Â÷Qà!æ‘ÏØ|!Ô$*Éw³<5&Û ½qËÇ: ·!Öoô¬µÑð¬M™¥zI²ÏC.«º…C ÕèrÓé]†çgm… AÂ2ˆuÕÝ¢t6½§wئ0%ÖWvÆÙTŒtc¹)kE¹Ã~öÄ©)Ä1…K˰‹`Þ½°ƒ¼Šc ÙF¢ìæ}kU„·v­‚›ëLbbiHE⛢6éÀg þQɶÝSœvo¤8ÜM!N=i %;to¡‰6tÓ´xï‘è³á¾âÊ´g€†[EâͶ„à8Eð˜õÝôWÜ!õ¸µÝ§:¶fœØÄëÁ”^DmÑÏ„NÍ.iëf¤møµ!:¹GmBz÷ ñ’çRôµmÔu³ãÂõýðAZì9æYÚ(pENá76nÅ®jÙ¡rzê=„Ef=»ª!m¥½j wV@{LZ˜óØwDœ²¤r¾èS%„ÛºM¡H;”wBÜ»U”7ÏT9,†Ù¿Kœx[Soi›OáW„™7ç:so‰b Ž÷߫ӊjœa♬Π31ɨïcÚ‡P¤éh¼h›O 'm û¡F—K:=‡ÂûŠvè:›¿ùJÓ ^¹¨È?yyè|ýÕ$o"$fWÂ=ïrPNj.ÇjwÒueÛ¡èû7ÆOãhNjޤL8~ª&%ÓÂ5Î0×›M¤yäí9»Ô‹)µf©®ËHñ‡’éô³)(W„K7ú¿ÏŒ‡˪×R›ÿP"&ÑeËi½ÙTï˜ÂHjøë›)ônW®›âe$iÞ=Љr4¬Å´ßÚÏO²¯¦—`5V¶š'Ðá aCÓ&ã …ÿ¦Uˆi›)sAd†¬Ó²£}i ³Ä·4å·¶ ïÑŒM6-ÝÞ½*8HmäK³fÀpì#äEÐu6•£oÎxªøGšj×AOŒ%µ³ b²*,öÓ?É‚¢tÿ¸ã#R2.R*óKX ØtvèÆ~aÊía¹”ò°>ÂÖæ²7ES·Ù0™› ºQ£}ÝhßZj„m¨È0ìÉäÔøÁ£ïZ»;s{á\_GŸçž‘Á‘JÞfûÑd•Yf…[*Þ BÙ»]…¬†i¦Š8NvŸŸí¿Ja Òáêîmñ¥>Ò|`:¦l6{a¦ó›y’aN¿ Ã)+fÊs«eGö„(JLq<ïR†úB·ªk†úBÖNˆjð®Àô[m'q5Ây@UªžÑ¾†ìÉÑ0ŸÀ¯(¦—10ÒPt;‚¼"‰±n¾wgÇF0„SK‡mL§J°>áÌrý•øBæ%˜ô{î©=ð˜DÓCH‡Â?H#äáîòhñŽüÛàe ÔøÇ <ˆá«™»>×v°ÜßòÃo71é•sÐ » ÃsÈeÛ ê.þÐþðŒ°9¢g+WBùùñ‡P/­z¶ü煮Ƥ[¼ÔφúC!¤)ÇT–БU)F€¼“ Dö_˜|¥üËZ-SÎ+ž¿B¼Üa·ýÕ`ZI¶…”«š±“2íEÜ×}e »¡F¹"ÐP#š°“DÚ:»~sŦ9X+òÂÃgAÀÂØq½EW ˆ\²Î‚¬aAÔ¦Ûi%;to‚¥ )_àWå„‚´Q"§³×ÏH[iCÖá³¶ŠÝJÄó§êëÑŸt 0…¦‚Ö¥éRGxˆ<_OÅö»µ…-A‹à½s¼û£„ØÕ;ü"ju,½iÄ„§nÕ0ëã÷&½Dw€lóõ¦›®G¸Gé5pë‹>n/Ìan¸t GŒ0Vl)ñ€+¡[à‚9» RºÚt×t£¢8û7AZ†˜½s•«?DÛÃOÎ0'Ü»ÄxÂr W¤Æ›á©æ•¡Ïe_–-/Ui=b–a.^–—wÊ Àp‹·¬¢:ëº{ëx¸÷k9Ç»Ïkeýâ³ãÙE‰‚¯ Út­[Á´è€ÎOLO8\}uÏÅ@Ì„s%>óœÿ¼¬%˜ÉùméCE(­<ÿÉùm‰–l…$«aw€ÅÌz(wV ”4×eÇYÕ½$ñÃÜú˜áj¹³t+²£ÆììÈÒû=©…eitú2ÝPþ˜^Sc›°øAÊÍ5¢Žô¥_Ô‘ Ÿ®…W¨óÇénaØa .Ꭿ_ñmͦ/\…)$DÚ½uš 6a #L:°hž.߆˜”jª¿w2ŽsëÝXÍ>ˆ²!¦ë¢¡¶š0ŒÚ`è…ˆƒ‚“ ‘0·l›Îv|bXM©Ë —¼Ú+O_¯2q‰f¸;6]Õ°çÆÄÚFtçÏ£}!@óí!ÔÙC¼1Œ¯–89°Óˆ[¼”¨*Éþ{5§Ù$sŽ¸ê­¸²µm>ퟰ6êÛóJFÇÂ/¿áŒ<`æÓ¾&奛Eµ¶Ð8V(vüöE¡đêÛ"ÉùŸd/D[š*¨™Ma³@|Ÿ}d³dkløè†…ÙÍ!9¿¸€ºè;áO”ú<øo…¹]hB×_ðlˆP*ўɬGŒY;4BP¥jŠá¾83œƒó×&¸tÄ®GuÇ‚–Úé¶É÷Ã)’ɨ²JÏ.Á®èvÝ›_Gnhk;V~–Û~hœÊ’²#õ‡€VÒxΞ„ ã¤ÐsÆUò‚b|–Ù¬¼¾À|ãœëPÐ!&›J¿c•œ™’vúUå²½©9”6-&©XæP"<Ÿò™¹õ¢Np%™µfm`Ñ™…rEO›u^¤“eg2QKplè¦1a½°©@¤ÚK•NÚü˜^O[ŒØPϲÒ|i ÉÈWa; É(iNès'8­ œ=ñú8ìWaޱ¥³Ú#žD8Éù#Æ.ùŽ0[³¤Ã +ð+òÃÙô~éð†pèm^ñ eGè/ð+Âe_VçýµŸé„4êQE4à?a^¦–“·?Î àËÿ‰ÿmÏJVd9ÿm~xèiÏÀ| ˾4´àû¦+0½V–z©£fÿúÎþ:hþá~ÏŒ~‹›m6”ÐHÛxÑ=dÀÈëo^ì×`îpÁɌާ©H;Œ A¥Ïe^ãr*ÊUÆÙa{ìÅü·!=Ÿvy‘è’“†ñ ŸtúWÛcîw‡räš};Ì£˜¨øEMbcʼœ–®Û/<¯änÊ{\(8Ê>UÏNU,%mŸ«Îéë9‡ÝH<ð¥)j*ZŠ”£R¥I;I9Ïì ”¤’ ‚ #8 zŒKy]•åÀJÖÔÈ·‘Eþ&Ê+Ò U´Ã^Z4Sçå]Bÿá-+Içã]‘ÑCÒbWÊlšëWk˜q…úƒÍªÉûíÛHûÔ„e–œne•äu Üa3®ç'û©RûP¼¢N†Æøý&ß%0¼²Ëzí$}Úî¬~‘iZ“nTi)ü'Âw'2Cñ£óEZä#Ùñ…­£¡íOŒ ¢Òm+€„ó2Ou& Ì»šŒ þ¾¹·×u¶ºOtáòÚüVS[ »R™+ÙgÞ1…O<*o©6» ÃyNeÉn5Õÿ&ƒ½«Œ#)>­ŸÂøVTZ•e*h«eŸÑÃÜkÒ<Ò~í{ Ã™Aõ&Ðuªm³î 0ìóìè™cµ0¬¢¤kO¬vÂrêÓ­6Ðû¤ÿIùÂg,0y¯ՅWuD&v]^‘DÔ®âa¼¹'-˜‹}+ñ"ò•ŠzÞ}5í¤uÒ• —m¸ 9’IÝ ò…_A+'Õ\Ãy0¼µ<­U%e5=ª®èvriÿLû«æ*4ü#6·6ûíz7oì-IÜD#+e ·iüÄ/‚cô¶Pÿ쫱–[Ê?Iàç2Њ`qÓ³¿ðÇBUùãôäáÖ»=JÕ òõénŸõåÓÓýÏücôôÇ$ÿÜ?–åÚa=+*Yþ‘¦§F…$uÍ/ÏŸªü'óBòæQV‡¬}”§ßXVSžQª¦NÓg•òŠtM/­(;Ó Ê¹A:&T:‘ù`e9á¢ac©?–RИ]væð…e)åkL,þý%=fÏ]6q|!s¹®êÏÞ;„)kYª–¥ªQ'ÿªÿÿÄa   !1AQ"2aq‘¡±#RÁÑð3Bbr‚’¢²á$4CSñ@PcÒ %0Tsƒ“³Â£Óâ5DEd„”U`tÃò•ÄÔÿÚ?þ¬·Zi%N8†Ò1*ZÒ:I CšS`4¥!V“7“ºXêRPR®¢az}£iÖ]›Ö%®ZÒY êÖ<…³ ùUÐÕ*듳2ÿ9Ù¥'í0Ûö” VVëò'9ÿù~o â”M 4Ëc½¹‘ÚK8uÒÒ­u!mÛö9IÈü#*Ûè;Gl”º9ÿçÖ7ÿääúÖ•:/IÏÉM§*ËM2øímj‹Éô‡hŠã·ûGË&–?¢6-)w\C•y!(RÓ~¼Ê=áS·ÿñ{Híyí\ÌÒØ•®+’Ö sòÁð;0†4ŠÑž´€LìÑco"÷UB»?ªë2z=55u)uÇa%UvaThR‡••îJq¼DY ¶ÜŠ[RCÊÀÞ;êFß\p9UŒe˜O52ñîƒ$Ù™ðõÁ³d«K‰ÇŠG¶ —&r܆ûsÇßvRY£VSC•R”¤Ó¨Œ!ÆšX¨S!¬s v ÇmNh¡ä”­·BÒj‡œm@ï JöDÞœi—$&¤m»U§‘ÉiSÓ5€·0§PÃå%]æ,Ÿé¦²+[VЬ‹Y r—1*e×Jü—¤ÔÃnmJÏXÐ,6›Í¹dðwlëe²»¬ßrSˆB ˲“ 8‰R”ÔÊZiD—y_ÙßÒ*Ï6††¥ä…@ ºÖò"Ìò7£ªòXtáÖ¦ÚMJž/ ÙC)q$«³¨–Ï𠺻ð±%4¡ï‚mI„ÊÉp7§5Λ‰×2+*1ÚIÏäÒª¥bÁs„K¢éåš&õQÆÊœpšFgx¢ýÏ|§òõ˜SK;;Ƕ5k½v¦°ëk÷<Þâ³Üˆ´ƒÛF"4•ò‹=M£§1Ž®øÐ òuåD,Û–g²¿ö‚Ыî¬ãÊ)$¹_™{¸áäÛFì½òÍlY6%E›gµ4P‚K 2î QÑÄW+ä¨ÿgHÙÅIh;‹G)ZôŽÆ¢È\Ðg‚Ù 1vK†>$’v~¥P3ôPbÄÕð«€ ±8ÖºÊ ¼¤8¤j’ëkÕ¨¢¹Å#ž´‹Ó™µKÎϱ,Ò˜_•l6šS(>QÚ*FÊÅñQŽTßrœÝñxBujER W»ùDÂé€ötÂÍ=ñ‹UJ5÷ÛÒ›ø=÷0Y÷ß<Þ™ØOÙ®èM®ô¡µ¿÷\uk5ôš?lGfçäü¡ÛÖ}¡8g¦‘)0óï*¥u´ ¯%röýý#¥Õ1¡$Vêž'>K{ȯ|Ièf‹Ly6sN>—nÝZî&È ó·ª8º´ã]µ¥ÞzÒ,Dº™ ºqwáüáo•³,ÂsQïi ‰¥ûé9ÞNómΆ)ï¶.*VÁ´cQ ¼Š”šþp¤)Y÷ÓÛW-´ß›j^C vY#wWg<Ú@¼®Nòq¦9vÒ<’ÛÚ#¥Òš=`ËI±dÛZ8νM¹ÆSízAð’üémÕ¬mM#Éä„­å“JÚ”y¹†¾|kZR\Mu²Ê¥ä’2oöwôŒS‰ÐuêùD¼3c{éãòs‰ø@²®Âî_US-z¼Z¨«ÿ6í픋h½z˜wvf=÷ÃK½3*= Þ΀¿.ÍñcÀ&Š&¹ô¯Œk4¯~=Ñ}~8ç¸)î:÷”Nðo¡Ëá`ÐÄÙUÚÐûógJ§576˜þŽZ#`[µ­©Õ¹7hÈ¿¨a–Ô¦%,AÅEº¯‚[+VÊccÉØY-ù+-enJÌÞ–yËÏ¥A!@ÑTYNwrÝýåÂÅUµ¡Ž0Ð%Ñ2!aB¤šRq;#Iô_ÉE‹ %ÙŸòŽoHR‡Tëró×Qjµƒ³]—ø1¤ ®®a7¾MkU²”"Št¡tíŠt˜•Ò7¥æÉ—S­ezéÏ ãÝÝN¸ü½Ä6R¤r:’1 ¥r®1ðÄÑÉŠôíjNÝÑí…fÉø¯g¶>þíO¶jϧ6HëØrßœ­&»°‰Í*˜J(¦H=Ù3oÚNjYd¼ê•p!4**Üý"¢4Õ™Ð=%Ÿ]¼Î‘JI-ÍLóVe Ô’¼saòÛ«Òûh-à8Õ"*zÖò‡k[­5>ì«Ì¾S9j.üál·sŒ £Z’š*†ªg[6SvÄŠäÜUÎ0q ¥@ZAðÚž1¬LyM•òygé^„鎹«a:嘞&¬ëBZm$Ê>òÖŠ)·³®7 VÚÂT Ù{EV¬ëó“2ÒlÜJ·«—o¡4‹%RÁIJ¦«ù •K2úà–’/Ã1ZÓ£mCЀ‘šq=ù‚ PäPN°Õ#÷ñh´Æ#VžÁ¶ÎZ¶m±+.Û®Yó:Ó, .¾1μZÒ„çJáŒi7”kKI|ŸZV»žMŸ–bzR’Ó“Rï%ÛB¥Ú]¤ŠlBØCÛÐcÈs,› M‹šç%—BQt¤•Ö• ¯'eGö—Å›àÖŒë`O9dOʲ ÃŽD³Òî¡+[m¨$7ÂWt¸Rž9Å¥Ô¡W#nÃÆ,¬›hŸ“œK.öf´ï€ºíÙ‡éQPþpçÆaƒƒž4‰ÊV»âÄtZ>K¤ZqËðhSNšŽèò›º9ÿ ¿g鳜³lëI#ShHZì¥ÂÛŽ–gšñ«¤Ö˜Ž¸Ô¢B×´¥Òª¥©Â„ý-ÞøF6xA5G&ýKw}J©ÕZóD¾Ü7ú*!ÀaJ|ÝqhÝFúJŠkU{óS¾4z^aŸ#VclU¹¤è¿ ~õn}èò, "£<׆=Gû?LZ¤’çS-””™ â€RëË–»B¢9AÐS²'µ²ÚA;Âï6~*{.êwù»ÕÃѬ%ͳ¦Äo|8Π#„p'äøÛ©<IJðè†Tò0Ð0ÜU~‡¶.ï0âNèq¸´›ÀŠF²—Ïò‹ À<•H¬Š'ü˜­z©EÀ!u-þ5gÚ²+´ã ˜Õ$É>ÙE>SËh%ó³Ì†ÕA™¿€+³¦ìm/·,©×Ñ11-6Cô!a9a}›Vc-Qa±b¥!N».ìÖ]KéûËm)ìT0¶Æ~Ù 9–gt#ZýÇÙ y'¬>‰‡Eq5=~È´\I­Êd}b4ˆ%u§¿lh¦’Ø·’yF™µ¤5Ø|m®a ¸™º|EÇ‹j+Ë G„)¶Æ>PPÈùº†\¯šOtZÁºP´9²$º3bi7”EÙöÜ¢g$›³ÔトŽòSŠ’ÎRnüèÒ]ÑÝò‹a"óDƒs¾ërkm]÷ ®¬O4X) ³Y+Î~3ýŸý%´†sGQaÍÉ|jxÊ¡¡ «•è<Ñ¥ÞWtŸÊ4›!j"VU©3ÇL°ÅÃó€©îdjYŒÀZ…iÅéËá©?+ߦžØT0™äŒÔ;ý‘ðš}ÿ”Á±B2‡Æ0=/¶‘¶<€Y†Ðò‚ôàJÒÜ„¥]X"„×h­êuFœÙoÊ>ŒÚúâ\q‚Ú%n•JW:I1aìÖ¹”çã?Ùþ\4M$´4MJ)•˜vfIÌ MכЭ*wìm)ŸÑõ:$ÌÖ”XV’Q£ò”×Ù«ÂÐGјQK.}U(á†ófÏ !+*çÀõä:Ì7i9J×E¨±ò O“ò£á3éxLJÓ÷ì…ZÄ|¿EªExÙtŧh…×ã@ô­û+ÊÁ‰d8&kS{4âM@˧†&<¦#Smè¬ýàÛrë(uÕ­(BOÒQýZóÆ(*ϪT…§\å…¥i „œ‚¤žßìý&’nuíK‰7o2åM8¨Lœêœ"¤|”ä*NÁÒKŸ²d4V]w¤›ó*­ Á´ïè­y¡3á™6¥3zâ«—E{„[)¡U_²k£øÊ§e+ ·µ‹ü5k¥Y/¹^Șôû²iKÇÙ ´ÕZ^ñ‰ëAÊV±äfqþ-hzo¯„ 4¯Ëôk—^\ñåÍäKèÜ”ÓÒ‹˜b^bûÒ¨ZP´¦ƒaPIÝD¨žhòx´9£knYrˆR–RÂÖ—Dæ´)@פÿgÚ­kf,]¥åt|>‘ÓU( ™Çô©c]nÙNÖQ-/qå^â·bxßVôNMPÑ*¯l*e^—Œ76½ýð&–r=°ÔÝ3T&dì0¹“¾ò‚ù¢Ñ˜RR„׌µ]HΪ­)üü#ɳóVg” yJh.ÛeiQ8Ýw‰Jj¡z£0ùT.«dù=œyx¸·šb](JZÞ~º¤Ýl)I½NR€Jp¼F¡M†¬ VÓK©Ê”"—S»û=ð‚ä­ôޣ䧙Z—h|cúLÙ,‹»UÁG\˜fY2^{âц"÷¤hò”"mjKêAÁiå Ýyv&U!+)΢3†Ü0âÌ-ÍY!xç·¼T6Ôå±h0ÌŒ³³j—XêAqiFIôõßþ ý9ß’3BòS§häXÏøò_ÿÕ^G¼¡Z_ü$5ôž’ÿ°˜±ÿ£¥1ÿµ­©Y_økwþ†²4@,/%š?¤–£sï:„³¯˜]uˆkÒà´àîeÉnú¹£ü»–±š’µei5gÚ¿¯B7“¿T¯mƒúggÚ3rH‘t<É5®­äw8Út!AË=uœŽzøuÒ,4ÜDÀùÍø+û=À ¯Ê(RyÔFwz#Ê>‡iVž[Y!µ|Ô¼ÊýæP›Óâ\7(wÄ·ô;´Ôþ¶ÐÒdHȲ6 Óƒë%KGLh¯mÑå'x2­‰öU5iùÛ§ÆW¬Bm³†Øè—»ÿdL-ro7Ð_½t%Õ+rQ{ð‚!“^CËGÑ_†°ØG¦ßüÄŠ<¹Û“r:;+bIºPå¶öªa)4&SÒ;>©7¾lNÙ&KF4}¹Pë-6ÅÅë›^ ÝE ö9âÅ_›•m§µëo–­:ò=F,…k¬¹"H"ƒ"¿Ñ¬Xê„ýÙüCû<îè=ðþ™hìªmWW;sàÇÝ–žp¥Ê²û?€›—»é2J¾JŒ[_Ò7Gššàö=‘7h ÷‹ÿ÷!¡ùw}å\oGÁ<öÔÂ|eÄ-óõsVT¤º·'Hõí6±ßygj—¾ |Õ%„“ ò½kðŠÙú*ÃýDË‡îƒ ò¡m(ѽ²¤X³k9ÜܺàyNÒVùrvXÿèfáhÆ—Zv†’Z²“so‘Á~-“Z{ôC~Wœà²’–®Yöƒiø»³Ž¢½´§tHyW°Ù<"W@ž—Võ[+ü2Æ<™é®“h̼ëÆQL+VódÞº½ÜÿV£ž4in)Ù ¿’“ˆÁaYgáQÏýžºjÜ© »Zý¨öii—ÝÒ†ReVÌûÎ]]F©ç5-+ŠO)Î-ÞRsP ÆÕ€Xãúb á·ùŒbÙ³Œš„Ã.+T~])Ø9GìˆjÇq "`¼óÊ_"–¢TOcD­H“º—\j^aJS|°°¡O·Jõ"m`–€sie8n4¡†Ño¨^-³M¦ã|ûúîÄÛkƒ ™g^O$¤Ÿ½vñiYL3h*`®åÀo%B£×Jú`k H\“ޱ(4©ºœÌ…Fò6Óq‡dĤ¿MŸ¨{ª˜ò2ƒg[6xP:‰°œpIµ¥Ý¹V±£nVÓµ[o'fiÇýžæJ?$!À¯¬šÓ‡Œi Öfg\˜¾2ýðTÛ„ðíupI#Í㈳´ìÅž;ì£}åFÈ™“-p¶T¢95ü©ßIÑÛRÓuÔ)òŽ4‚\¿EÒ¢‰ MÑUʦœEcE¬“nÎMO85L¸¢Aô‰Þ9TÃvø‘³„ªCrRÜKÓO`]Ò:=°¹Æ%X»æÊýF\õ"qk7ÁÏÂH^±tFÍóžá\H݇]b×™“y-)Ú>]„¬0æ'ª$eߟ[)|ªU‰¾«”Èb­ø0ß„[:A(©iÔ0Àxݧaèï+2³6åD¼ïì˼Ÿ9õ¾Ÿ®”öă ÊÛSINæÕA¼$¤ú¶ÿg½ñO}žg¢:;3(µ»g¶Éo•úäϰzŒZ&…É©h\úR¦ùtZÕO² z¡:)òÔçüßZ#LmIgÝà²naóPâGzh­)cI6ÜÒTÂݦ¬j\zöÅbËk JÓ+ØÊHM:ù×#î9‡@¢f×·-ë »ŽIƵ&]еT&D- :/HàvŠ-e¢Í³} |Û‹˜—O]]˜Aéë‹eSV=Œóö•¦‡¦f1`7Tt(Ÿ¬S Lξ„:¥¸·]äÞ¼šýª×Hò ½g6R¤­Ä_Xº®Núë¬Kx»wðŸìùÕ]–˜#=Q)YSM¶ðkYÈ]êWþ‘hãÓ*V£.캴ýQ"†ýqÌqSõÈ…–›Mä¼)>Ú÷CIóxˆl&%æ™D½ô¥ Uë—Ü£&öï;p÷FÛ2V­±+f6…ê[åݽAÝT.]÷§ä¥Ú› OÌmÃøQÕKå&¤Û³Ùqô§W/qi»Z+v¤sÄÉ-[RË8žpª¤å–{iýŸj=vÌ™˜VÚ×›·ÝJ—,|up£Æƒ¼C@[“¤ßë“>ˆJ“÷”{âÖ¶'Ò/Ê>¥£xaÔw- ð‹UÛFÚSNOÍkœÒ””Ò%X8Ü/-F.‘9½°ÜÛ–[“.:^gVnáÆ÷Û]¦›:~Vi„9æß¾²’wçSÐ1‹|[6„íALÌ­\V]Ƈº,wR—¦äëÆAK·h~ŠñËÐËû3Ig52º„ž2ÈRþˆä޳5Ѿ4·M›ÐWlËYÙhÝÎŽ¶š}¥ƒØ#M´½ý1·§­©¾/ɬi-ôôÖËŒ³y ¾®þ1$â[hÜHÄîÝzᔠɧ֪]gH§¸Ù¾%lÆ)W MÓ퉻‰AhܿѲ»ÅGy‰Æ[mº¦”÷뉪Ç÷¤å\LJ¸†z‘iK ”¦0‡x$—Î?è¨Þ |yòdÌÊXz,û×gf$ï2…µ1q\Ü#UÁ’¯˜§’¾hœµ™¤-†­3N 'ršpÜR‰ ¥oqŠNÙnº–PV¼‡iæñ¦Z@%XqçÖÛ`¨•wp©4æ*zC'>‰Y]Z«å€ófïÞ¡ê¬Nq[Ö·Š7þY÷l†”KW\ {ÒL3Æ4ĹS¯4Â1Bq;QËêKz®]v}ê^‰ÙëjZ~Ò’J‘5hzjMeú ÔÙ¬i ˜›fU<ñb©Kâ«dA"%*dõfY!yS §› S DÝœ,§”57Ùô®øPE•"Íâ»ü^ƒÙ•bÔΟ&$TÔ­—+2¥–ù{iÙRz«á3G„¼¬7åÑ5h¾¦àJ[åS,7oê‰ÁªE‚Ýá®Zb9÷øÒ*%(»ZÒa5¼ªaL2¯51®ìކeš“m7PJn§¾B-w‰Ùœýýó'öŠ…–ä¹H£s\~ooUbÛ±ÚµZP ºâ~-Cêï&^Ræôbq¶Ö¥YÄaXqÅK¯ø¬–ÂþºBÆå ËÍL´Ûì8—Zu!hZ R¤Þ½Çý‚H“ÆÍœ•‰ÙÌšx¡% ââ°Ï’7ôîí‹Fq2’:õ®âùëꦓ3šC6ÚCŠD¤§!@Ó…t§®”ƼH5p´kz¨žìû£Ê:¥ææêT‡9 º®7U*:ÄY󉫋UÕužñQßo•$-æ«8µ§¦&%pºkCAëÏ"Ìh„Ÿ›J)½L7ááœOÖq äãÓº‘h¼©) íÖîð†}Ñf°CIvõV¯ŒÌSÛÕ€[î­„'rÀ Ó³>¨²n¶ÈJP*{±¥{·ÀSV[k` 6–œÊ?[)ø²Rª)‰IRrùÙC“¯n†ÆêñvóÅ®âõä­ çTÐܽ÷ƃ-M3h0¼LÅòŸ™¾£ªÖtxœ9ð=™÷E¿g&iï0ÜǤŸn]ñä·ÊTʼn=þMé §€¨ù¹‡/-Rî«å€¿6¯Þ$ÐŒN!AiRV”­$)*IP5 AÚþÀ™s÷c¥^¡ë‰fñÖ‡'§é}ô²Ÿžr³Í>†Ò^+ÀâTB…zˆ¯tim²äÐàéUÄóVú•ñÓª¾hºÕÙŸÖäñN¢—+^ˆym…6ógŒõÖø%Īõp4‡~PáÒøl£×_œuMëkJR?1f S]½=±£²\>ÖbP€nâ Jpë­6wÂdÕ*h¯¤“–Y™—Ò±—ecI¤Š~-;÷dX³œØœ–*)\Øý]4WÒ™Òê~¹LJÍ.wÏ(ê¾dJ%3k϶4‚Äje<*M‰ÏE §ï÷£É”Ó|讑½ªu¢!0íåR§âœu!HK^Š–°ÈÑ(þ¼êî&»N§ò†˜*ã/,éµ];‡éqÔ¶*sØ7ÄË  Ì:ª ü¯ý#ÝI¥zéŽ Â‚Fä×ÙNøšzeëÊäﯹ…Lrað‰ç[J5…\Mô>Wº4ÛW1d¾ŽSÒßh¬zÈìÀZa78ÙG ]Ÿk|i íN;ù« :‹BLóòvSÃdXkXZÓJZ§/5kL"tÕºnõÄöªj{‚°”…%²â—K¦íêfªW ùáæ[×”¾6êV-+;0ë‹B^ÎKÙ8¬\ÌÓ³>ªÅœÐT­áÊìëÆ4[ý2f²•@'hôA[.rÝ=ŠË²,¬Ö•AtW²-YmzoŠ)8ävtiÓÓü¥”RSt7Ë#gPÄõVî´Ñ³«G6(ùqWeÍånø¿ú—|!wùc×áX¶lå! Ȭ¶öôñO}#Èÿ”!¤¦Ãµ&Qð¤²/K«Î<Òymdµ68èH*V®ùä·‡õÂêÚôBR§—}cˆžH÷ïß—éR‚AQÈC®ÕEk4®ü€Ýu¦æYÎ$åõz) üWB>ô4óΫ_4²•t•~ÅDM?Ï3œ{Žªo¡=ÀÝY=rBujM¿>áSÝRë 1Ê-[ój`’yêŸlX®¨%2GöƒZ#}1år;Uõ“zh¯ŠÁUÆ•ª†U'n"°’Û¨¾O¹‘f}Ñk»Á-¥Mi-r€îõæÛ YjõÏ!Á\ê’|1h©µ¡-$ÕÇ9 ¡=b‘(.É5lÐTÖ|ÝPÊXBÁ.Rå0 ¦GùÆ…Ùœ F‹vT$bi•r®'m0è€.Öð¦{Ÿç nñ¢–­§´øØáÐ"Д dÒ¾øî§„O“-¤ &ªÝÑSÕš ›Åî.ú+Ù <µ¹­O›G¤¨÷D‹åÞQ…4ÛþlâÞÿËò‰›-û"y6fe JÑ©}-- I¼•¥zÄÝRT*’ ŠƒMü© Cu¶PÔú…Ö¦h[×nŠÂÎZÄ”× Èå/úÛŠ Ã|!Ï6ؘÓ4úFÑ Ìì'ò;ò€eÁ¶kˆvgr >ù¢>ô6Ë„ëæT^r{òïª( V;¨}©•£”i×_™š®GÆ'R5€‹›ÿ#t[èSÖLÚV8ÎrG¯!×4òJPç¶•î‡Kó…4MÿD©#¿.øœP‘˜FW\ªÃ·#•wåÈ"]Æ’©•QÀ’+ J†å®©T4:ì¦Êí±…¡nOÍÍ4•¶šu—.+ü³â1艛 .ËQ ¢ÿB}xF›hËÖ|쵪Ã'RÑã¡HOBB«N¨—uO&ò@#¥#¸{¡ÐÛ ’i4 8ݳ/lh´»ŽPƒ\€­1ç‡%†íÔçßžè,q¯Ð:7gÔ¶kA†îx·œúET `ª…@;)ÍÏX–YT¾øxÒt¶ÕÕà®ß ˆaÒŽA¯¿<2é‡5fªõ+ÙVzzUùY•J-¾V­.}„šõV4OŒÛH³­uQä–Ý7ŠÀܲJÒ6Þq°¢T’B’qbõ•·â‘Aúf¦ȺUE+náU|¨ ýg>•Íú(K•ûwn}èÑY9»Fw†ÎM:ò}%ÞÊ¡î… ‰¼¬¿ùc/¥®Iî>ÈšœoÓì >x±2Ù¿rÿšßùgÝòÁ–è>táCQNzò{âZÏ]œh’)¾¹óáS TêíZµyC<è)]ê¥s„Ùn۲߆è£ûÜÃh$ŽãM¦PeœvyN€¼Œh Ûž†´kˆ¤šS {7öAÑ{¬¥©pYRÍVSJîØqê†4J~åÓ;66иœiÎ [:2òKkš×5¶öÞœ¶ÃÚ-gNMêâÑJ®Óéœ=ó†Â¦tšYŠ^eº^#f4é=@ÆÊ%†èªŠ+ðÖ 1¦jÙM”5¬”*©é÷]qo­E¾7oþQk:ãvú‚×jñV)ÆŸ(Šå²°×3tù¥nÑXmÖÿ|ªuà 2ún«>lGh¨†&¿ßß(¶”ÞR¨ô'ÀC/j~1}4>¸en0­j¦œùµ?†¾1¡šZ§)#hqNU7Nû©¡Û>úÿXJBzNgôºâZB–£@7Ç”Ï(ÆÌkƒÉ8•O«öÀÕýø³¬KFØ´¾¶ Êý­*ðR¢E©vÛÕ2€Û›€§~]ñ8ðm«ª4;…UøkSA×u(U\Üj‘ö–žøRË"´.œ {—C pœ 8¿ÞáßáXÒN,¶¹¼\ìñ¤JÌë–”)UYÆèã‰êçÎ,”35;Áʃ{¯›¸E)-.Ø]ѬØßž#Û,¤·ÆN>7‡y޳TœNôïéë€Â«·U€¡ìÏ.|á©tƒˆÇu ð‡¥ç(×ùÆ–èúæ˜3Ì4´ŸI çC·g=9£FÙÿO¹y!N6BVœ0ë®8†, fešWœÖ„1DøxCÁ!WÊðÇ:“V>õ‡šWŸ8Ò"8*œ<”š’On˜3ëNnqg͹ÈPãW©5Wh„ÆgÆW&ŠèRO†^Hó)ZWóïâAh!Ÿy´t­>£ Úòêfë“’¡[¸K'Át‹:mÿó+éy´ÔRa—µjÊÕè)%.ËP û±.µ.[„·æÝé ñ¤h~“ªÔa,Nѹœ’œúŠ“Tn5­)A ©F ’wRtå<œÂy!ú]eò‰êR ç#`Ù]›5lOü-i¸µÿv®0ìZ’]”:K× û±î‡Ü­UáóGý@“‹¨yPCÛÆÿœK[³ O©‹B}‰´#•CAùõ@QRo&ŠÒB¼*aå×~VàAïF–ÍN¡¢K¾ï4O|YV{Ò2jµ]@Y-Ö„‚ÃCž8F)×f ˆq*¸ÉN*!9 ûbÈšÔÓX+ïÖbÏŸ+N# úÄ!÷×ÈJ‰Ï«fpy"¥~‘ˆÏ8”+W/r­+N¨vY/&ð]Ѽe­žU,ëi@Ô£—RM1˳7&“oZ3É*XEo…ÅÏ!P5ØbÅRÈBš¶5Š>ÌN[„M¸¥¦q8§â è0ÓŠ)£¼U hò‘mê$‹ ¬%é‡5-"ŠKWH M+ÇQ ŠâªÅšÙBn kO΢{ܺ#„cv¸ô¢FZÖœfól®BZ•¸´TŽd^.ýØ4êŸó qXãÆ M7bA=5öÃZÿµë3â«iÙZ“ßNxkB¥Š®‰Izr‡4;ƒc(¹‰n…{ 5o[6ÁjÒPµ%SÊ™ìÿÛUí‹ u½&-3$ân=ñcâ×K·×üPˆvÌ´4fi·]JÙBÈ(PR]ŸîTå:ésMNË34Ê‚ò°Ø~RO:UP¬éΖ7aÈMÔrK¦õIÛ.€2ÎëÚMj¸ìÜÒ—.ß+2”ûz„I&^JMÒ¬äP^¯Ø½NºCÄ~è×îþ*E©jËH Ó†Ï ÿ´E±jZ³T—°%ƒ yè )= Cš5¥‹·Ó0Ä è =^ÞxNM©Ç]rÒ{X¯þR[>Ò"[@mªì½¤ä²7©%Ìz;¡ºFÛ·åæ%ßé¸iõ½qoØ–ûoŸ²_KDRýVÐ~BÔs­2¨btORŒG¥á‰.åJƒ‰¨­)3#6ü´ÃŒ ‡¡Å)§i®ÊWš4BVn×´u3¶Ç+¬k²í}‘ehü» T$ˆÄß|a›2]¯Ý§vÌz³‡d2¢q¦^û!kC!Ð8x§Õ)·:D¤ÄšZ)}û«­@ZÇrO®&g½)4MK‚ƒ/W:^ Ë¢,½™”žž-­M8¯7Q}K©Ä—Ô(j¸•i2ò÷ã¤)VãJ)Ù\-´8šÖèæê¯·tZKC7Š.Ñ¢pmãS:F˜é#ÖÅ©16ª&NYQ¢"æÙ Ç¯Õ½M‘d™ëZkQ,¸?ñ•H¨^F¾» Ci7žým:ƒnËÔµËd5dË© ËÈ<ÜÙeØì6m ¼()—YõíÝèO©Ïwó‰6«èíÜ®ñQßeµòÙÑjX“M»u5Z²¥zÍ„XÓSv¿/<ÊDªmEn‰¯ œÑÖY3~”èü”ÚŠmH½¬• l(#X•Ÿá)!6$œ™Ñ™Ñ&éR¤5¡ã„Ÿâ"ín«x4 BJA I5 ‚6ƒ‘þ¯lZ(‘”} KZç È5‘Pô¾Šj¿›aiÚšchÌKYa餿ñz«¿]Jº–¿â”E‡£¿ÙSÌ©DÎ|â?nýè“™Krò-©ZÅîW´ñ{âз–óꕳ•¬u< *€Ÿ¬àJUõU~‰¹0÷ Ÿ{Z¢GŸ´¯4æe»íßz;'$²CIÇ3@sÇßlj%Ñ‚QFª``¦ÓŸ4%×…G0>È””–¯œH‘]Ù¼ÐCöK3M\^¡JÜY9t”}Ô‹KCX\©nÎU®¯ œâƒú ©½…oh£ÓZò¶eQ¸âEä(aB w®×U9ÀÛä•É…Û-¢aPº”’G½{©L·Ä“-†®« ãNnœ£nÏ_¿d<…”‡YHö4¾mÉ~ t”\åR¼]ÙgÕX²\˜œ’P©aTÀ€wíâ$Pá`Õ9cÆÃnÚÒ,ÈãÓÍG= jTp ¯=@>>ý6Ê8¸l€ú+#I_vmN]@:æ UáÅÄÔœûM+LâËòkjNÎÍ"ÐZÚ–YÃŒ•V‡u QI§¤Xz'' €†%ÐÒkËÀ’Fúvû!2ˆ–¥Ú{1ÛïHžp1Ÿ¿`0™„ŽI ç&•í¤5ÇUõ¶hq¨8ãL÷Û úIߘñ‚ð¥QvîU*vãØ ¬8½ZBon¨ê wÅ»(Žªø‘ÝÚ0ï$ZLíƒk±cLÌ—,ëmW˜/Þm-LÿµÑÐ’ÒZµ„+æÅ£(‰¶.%N~éD¤W¡Dß;h)æL”ÏnWƒ‰(ÙŽ)%9qIœÿÕŸp„½×.alüå±8ÆŠÙjS“3½pêîÙÿÅSŽ\hº+Öÿw:?cè5„¥HK´‰…Ö]u:ÅÍŸšEí^Ê^»–;"eÔN¸¯4ÿúEUÝA3hI;!f²Ú¸AÝBGJº;cG´[V”LLÞ+–9¶Öœ®šŸdÄ‚š•h6Ÿ"è Mvì® °€êÔQC\ER6Ói©þÖŽÿ\-·T*(­œ¤úÈ„%@TÓínÿ}±-Äù»=°ÛéÃ)M'l25®ÿÎ4þÁU§£6„šÃ_«‹Æe)EáCöò½È ƘFX†ÇrJh4¶Î ?‚JŽ«"hššüÎXÎìYN¿t­Ü~–Úl'§ßýN½J>¬bißEu¦Ã_]"Õ’øH„)!JúI›ªM;r‰W[°Ø …9^EA¨F£¾¢}ª²¦™d6á¶É9áѾ„Œy¨y·ûóų:Ì‹_pƸTåžgvØfRiçøBª/s¤áŽûôC2Z\Z€Uv§ù¡o÷F¹¾ýÑ0»ªP¯51§OåZì‹Bm£ƒ«¡ÄÐ…ÏZ ¢ÑÒÙ)EܽUnl-Òãp({Úe-$µ!: çÅñÐ+ÿ‰Åë‰]:šVvs€óºØÿ¼zâÌÓ†¦]ÕÍʪ_*Tƒv–Ô°ž’i¾%-YæµòêJ—]‹³ˆçÈÅ£g·j·t.뾯Œ?%0ʃ“*/¸¬–Ñ ™_£ˆû•%ú\Þ‘ÙH³ç&C¶­—ûKD- = ZRۿ𖸶Ы2Ójyuµâý0»Õ™ú Ä£âf]·­àqËH8nþ¦r‚ºfbИK®.õÔ´Ïpœ(×¥ó¾Šj¿›#g2ï ÒcöºÈ•Z^ ¡GÐUÕáK¸Æ™Ïë–¹$¯Šß,c‡u;+Kk·fÊMÍ;FоJhU^¤èщg-ûc‡ÎL¤ŸÞ¤¶¿3Ì8´8Tñ/e {Î!¤€–pY”í¥îªÂµ­L¶êÓÅk$”Ó¦„åÚ““W%šãm¹¸óòD&ÙsãœS|ÁUÛ– ÝŽÞØoGQçP8×o…`hÛw(™•פû9fÚ’®PQmúI)ÿL32¶—qð¤+uժЌ¹á™¬*UïÛXwRü³¬:o¦cI#»ÕÌj XLÚ2Í»fÎk{U.MÕKW•x(„ŠDš(z1 Q¨M5ÙÍÝ—¬6!§n\+ù=~ªãׯ]瑉EÚ¶ª±½.×)|œG2¨OPZRÍÊÀ‡TœR2îð‹RÕjU%Á]¸÷V‘4—æ¦LÛÕÆ·Y¨4ÝŠMÓrÊ-×Wׇó‡Xð½M»ç…¬#Ytü_+1üú£ItÕ‹!o´—ìÁ­HRÖO@‰«kHmylÄßþì((oĶH÷Ù H(í<[?<•ûbQÙ‡›•h¼Î±¾Zo#ºÐõ R/]y…sq¿á ¡!ëÓ¿ß—œ™ÑéÝcEÇd•Jñ°8ú5¯>"‘aÚÌZ¡ùW£} hG2ÂOjDOʇStõ¤x‘]«7¢Öܵ¯,úã?,+vcéTö˜´m‹>ÝÑ„å&ZuqÓTŸ²°•îù1¢3Èš³Ê/Um.´ÄqUÓ² öÿSW$ÂÒNQ§V•ÁgÙ—õ&Ô™àÈ¥MùmüZÝ5wO4:䵉d´¤¶Ä³U+uÐ »¢ÔµU0äÛŠU\s‘ž=t ë"4Ñ禟³“Çÿk@RMÞú/ê^Ý!e)d-H£ŽÒöG-ÄgIwV†YMK¤ ‡o„ƒŽ5 Ÿ«Z ‘i¨6ÓNÞ[´JqRº6‡6Êæ"Ű۳[¾àÌ.„ƒBz+—\jñÃ|¢^éøÃC>yã„6¤áÏ_Ä)Äþõ¾ßʱ7/*øªYHVt¥ æ­)’–ƒ«6]–Îõöðê*J»NŒa©æ‚húÂ:WêI‡$ ¤û(¾+y*#˜r»]!©æÚMÀç¸ ŠÆ¹žL;9²î„Ôe³.“Hru Kjê•+u}xˆ›uaz–8Î’6’­žÊÒ%f™³Ùm¤¨/–º+]ô‡-tT^SÊ)ãSì×dL[n(ÑŽ7Ú¡hÚ+)Zï?RHÄmÞ@Hí‰)¨ßÇ›gN=Ðm¦8£n"&Šœ}Y&î*¡Å#,{2íÝClºÄÄÔ¤¹囨Eižf÷$SœÄûršE6›Måð×?gš_µíä"õÌþPÃi\¥ô¹Wwb;Í|4£ ¥¦ßª]>˜öÁ!o^_'¿² òðïߺ°•4Yº¤‡Nâ=±¡ö”å‡h±#4–¦ÿg*7‚ún•]érìy·ÛoTŠëµ§M)×Nx¶åÕ¯¼S×îcAírý˜íŽØq cãR¥Öï}Õ&4|‹D3{Í,)çQÅúTþ¦³DÁZFg¸ÆšÏ^Óm“q\IïÍ õIß…Bú¾cL4¡™áÀ¤ÁZ:ß㤚H›%´²Úƒ³‹ä¶š«ï€QÚ¡0˜¶'þ˜%G ®}ÕþQ(K 9Äìò›EQ•k´e–œý±¡² ™E¡6jÃ5ÕÞÞwW.#„Žo\kÒUJ$Ùz¡ NW“g¿„3<1¸”î¨ÛLzsÛê¬)àúƒnÞM6M7á—~0ZS (hŸGªn¡}c~1ÄøŸ²|ãDtWnêEŸh9b°¶¦®i¡xÁ;rÄŸ¸™ŸfzynÈ­-gæÈPÏͨ€•ó]$¾-LnÊkM+„ľ\õ^ússÂþk”©”ò…â•ur¿ŸtrNE Ën ‰ƒš¨ªö”€:ÍcHôð¡ïì¥p™é‚‘D^Hfµùe!¾Ç;âÀ—˜•³P‰É…¹4¼]J¸ßs(ã ß#Çh$wÄ„ç ÑÄ;qϲ”Dºo$^”>'|Z3(@*mTm©xŒy…/RœØöDŬJÖ§0I55Ç–Ÿ–1k)i´ÊTäí§8(T eßyf5;--l3@¶0™#÷cµSæÞ8ôÂT¶Ú¸ŒU¸{ëNøJÈÏ» ÂD¶c_èÿêä÷À¾Sxrw×ÕZÃ*/óûóÂ0Î-ôZìÚMºö¦^_â\E;¾¬°äˆÑ¹µMHI,›ÊÀÞÆ¹cì5‹y M])â'3:…Is€Ûn ­M³Âv)¯K›èª‹æw‚Î…‡ŠnA¢³d! ¾„/ÒJUÚ+ýIÊS¡î"o+ïÏvè¶XáºIiN©ÂTßìçÒíÄuÒ'XB–hG*ªHë“tõ(Å÷íéÅM’¹)tò]W_ª//îÆH²™ Ûj«<ªå:ò¡ËBn ¡IØv³TO¥¢’q ©&¡Gªír÷¬Xšed” ('Pø¥iZIÄåTÓš¦Òù$¸ZLß&úUNšÐGùW+Zp¶ ÛµÈ#·/½ Ûâc>œÇ%cÙŸT5jP‚{A®'n@CSÁÄÞ@MŠ%ÞLÀóÊÔœ7žžMbmå2 ]+Å&ö‰'wvè¾—7o¡çÀrRRi™¤‚\à¡©¨Ã•@(vaM¢4¾Ê·ôRÒL䔟°ê¹ÿc5ýâ~5chRR¡LIÀÒÀÓÆÜ逦“éK‘÷ý±nék¶²µò÷¯/%S£ˆ~ì[ö”ÝšÂoO>hÛ(J”®ªT‘HÐo'ÎYˆ6•´J­'øÄ)hsSS±H*GEÕÒ*Ã|µ °ÌÓ›(”—3Lj›£›ßl7gêM+Jt{ç=s”hÞó—¶'ç’âßJJ38¤m¢¨{¢j}oZa–ÍYpñ6'¬z0‹_Gæ²ÌÈeI#:b{¼NgXLÛ6\õ”|âŠMp ò³ªè3o4*IÛ>ÑrÏR¯†ž2ëQ"©wÑ"µÇÒäóİ[ oI³ñ 6âõ7~Vêûˆi%g#¿£œ@\ž¥mk~q¥{ñ y ³$.›ÜQ¼bSÕ¶-–µ’oßœ{}ý‘#¬E«#/05G]ª¿P¯Õ=.!WÙåüØ—šC ñ©¬ä`¬{°ë¤YN¬Ù'j–j½! ñýIížû¢ÐR‘-U`=÷V&?Ï9ò7þY÷FžÚˆDº,Ö¦ n/–àK•‰‹6Ù›elµ3u†ÇÛ‹ÆèWq˪4>Ï_ «1Ç9#¬!樼œ'ŠsuÒ4¡É[!«J×[*V£4Mv÷û"vÛ~Õ¸úÀñâL6)0®z žƒH™”F«[­˜Ö¼>5OEö]™yíÚÒ;Ô@ï†4Š×±]ãLºë{ï•xT÷F‹éß qÑTš,ÁC¨Šã²›¢Î¶Úu!ä’yÈÐ) O'+ý9š÷Aºî8nß\êqü¼aŠTrÙžÂ.Ò­q‡5Aï9&ÜûfRe q¥`¤(@ íéß“×ì‰ÊYŒ*rÎ8ÜMÛÈ·ÊOwM Ù³ª^­™GJ÷Ý모0é‹CU/h9kZ!÷‘û<²ÒJzrRFüÇnO˺Úîë–Ñߌ *jiWJÈñ’Ið$ÅŸe.ÌÅç’;û’t<å²÷¤ÿ?~ËFeÉ„©(PMÒBà‘Þqê¬3gNω’æ± s1§¿0‰¥§_2kƒ¿hqîâýjC³_iá|þ¬Î!F£o£KÕèLXr̢Ǜy c‰]ÁwIô¶îí¡ÓáŽâRî yƱ«ý–ªÅ¾WoUcM,ã7'2Ãh¼—†@¤s|£Yr3HÍ'T[ýœ` ®z¦ðOÖ)‡¯»/Ä막P¡;Síƒ.‘¹Þ’=dFÙ«œÒ9{=­£1Z%$šaˆâŒs©CrÓ’.ðRµ Œ–9¹“%¸ÑPneíZý¡]æ†q)i ò–2§×Àá©ÔqIÈ^5+ѓ͔ c¼GU`L-n߉³úgë€Êžä>¼¤ðÄÓ ¶šù¤=S“xõçν] !Ö÷´§M{±®Ø›a°‘T¦ø®T¯YÈã–>¨•«b¨oÔ:áµÞ*ù<£{|ZSwªA«›+ÕˆÀÓ¦ËNJü&ïaGõ•«ŒÒ”^$}žxšÒÉ4*ì„»n£¢å~ÐLi•­+6C­Ùá—p«ˆX¼yÏ=›1‹Ý™Tª’¥,¨Œ¯“†t¬|#1gÚIV±II<|ÏNU'ª<¨Ê79!fZè]æåOŸ] T€¥eòs‰@‚‹ããw~ywÆ´{ÿ(—n]ÞTº¾ÚÅ !Lä5½$ÝH•Ænçî·þY÷E¯0´Y“!–е׵wAÆîf›+ò+MgîïF‡·ª•aN4”‚¡­Ú’MD֘ŷ5«—†ÄÔõ¥&ÓCŒÙ%a*O´åÐ(Î7%(¦À?WÔH'«RôZ@¯” ƒ0~UFDïþ¤ð*8FƒÁ)¶4†yéK>Ó™Õзû9¨Ç°áõ©<¢^u²qz¹„îQçÉTÞ’¡Û I:Ô¨ Ÿß¼Ñ=ýÑeL 9ðØHJª0Ýd›È5M:ûé·Î'%”ò(àǤÓ}qNp†Y:á• Ei²¤öeϲX¦£Œô… ö© wà ÓY_ŽP¤­<¥‘×ì$wÓ«8Í‹e×ÖhUF› pñB«Mª;س%·t 6¦‚ek[ÊpÃäš+îÆ™èŽY\ô¢Ûaô[Bˆ‘!h¿&ýTêœf¸œý§º,ûRNiiwºÔ“÷€5‰„SV+¸Ó>œ»âÍMæ¯,ÏåsåS œD™µSÒIƙԮ&_C«ªNÞ|ºÆpòÔª|žŒr¯NÈÕ)Ê^RpÇ”“ày¢ú‘ˆJR>š|J¢Ò´ÒƸ^ÅYr‰Ï!ã žXPZÉRÏI§X®1¤“Vަ»«Òî2*Mî’=±o5} UcxÛ<"ùMJˆ¾1÷9wư¦mJ*- ýš÷ų*Ôö‡O!TYàáö¨’N¯;Ù`¡¸ñŽãr¯Ð§å˜í­)×oYÈǻĈOâñîñ¤!d&ùäï÷Ƨ5ºÆé«ôÔ¤¡ie#¾&­v¦eìÛ"ÊeÍ9rôÀÚGì©ÆŸ²Pßþ'É© 6šlù)Vï­¾X'oIÀ÷Æ‘ªp*ÕK˜Œ)ŽyªÆˆK».ÜÜ󩾫SII,}IQ<çú+oôh'¾<•Îkôu ç ©8á|Sn“°ŸêG3HíÉe•ùyáã^ªÆ™ôôXO2‡¯)h áùCï­iéJÛ¨Ävû6sl¤i˜ä…£ÃÒ(…šAžÀzr¬Yj 5|ÞP"‡h¨Ç“ÓŒL4U‚E{Œ[Vv»ã×é/ÐKE*3’+{“u#¼Œùù«HÖÛ,Í!MN ÚP8²ÿ"¿^Ãu “º„xÒ<œÚŽÙºca/U}òÎ’R?Wâ‰Ô‚~M*6ªÊ·Š¥rWmhsØ9¹Ìi=¤êÛJûŠ¿ñ†ñÊ”÷¤Y(3"‡rα¦$?Y–qdgt`=QfÛjó`¬ÍŒÛ]~Õ5z$Ÿ´XSΦ¿žð¨ÇÕeîUIçÑ fåolÎl ÍñÏž<ÆØDÝ£'*8¨ýa߈†hÚ‹¸ØZ%v¾MREiCEW w¤ÒaÝøÅœMMI<ߟi‰Ù IDÞM]ç¡§zkª Ù"í2¥Ü;âS‘çpïð¬OXs3Rb]‚ñ¦ÂŸñ E•"´Z·C¹Ý8tóF‘hºååZš™B‚Þ>o%ÙZaNW4h¥„Üń㎤8•º¦’Úèxç£Es`cIdQ£ºWjXÍ¢ã-bÆ7°8…NG3H«Eoî¡ñ¥;á*RQ¬!W7¤w&§ºy©yz:´ƒ¹>sþ蔲æmÆuûò6LÏÄKš%ÇhRGÒ¾4SE,ùy‰ižeTÈ¢o¯/•æÊë–8Ô˜œJe™J† V[û3b4¶ÐQT¥–צ§*•ZãZÞYFUJ¯8‹1!® .0K|±•=½U‡æ™E‡Ò[åá~Ÿbõz«Iæu²µä‡UúP×›-´þ¢p£JH2stýç#ŸÙ×HÓ•X“Àgœƒóëm-¨êˆ½xÑ#?J‰;2¬HbÀ¤ZÒÍJðw9GåPÍN|Ûò´mÆäç<Û­ª':EkêÆbðIÇçë…%0Ãë‹JÏ—}¢…$V§ iNÏ·¼ŸHÚq´jÝËë4XÅ© zAf:¥Ë‡'R9’ƒN‡œ¶ã9)9þSØ‹~]ÉdÊLLT]¸žªÔlâަش½lo®Ú( vŶÓmJÖðRùO®,¹‰Ykl¸÷ÌKj¡=”‰i] ·© L¥˜?pƒ5ÿ7XP:ˆ‹ F’VµH v£1S†èjI¼Hm48 ÞýÙ`F†‰#!S™8o·#J¾dÑx\9YsŠƒÛÖÒn±yG“^l‡lYRç&-5jS‰ÕWYÑx¢ögª©»u¦ÐŒrªSë¦Íð§ •u-P‘€ÃÄÓ›‘á jÈc0ñË´ÀÑY$<íêî©õÄÅ„$'‹I@ T±SíIK–ËwÙþðŽÅH§7DM1)3¤ÚË¡™Bª—M{J¶åwtZNÙ–Ô‹¬½uµ¶<ÅöÔ+¾†—GZ«5ª%O[ˆpW•Ò”9Ôg¶§š<¨6NŸOÌ„­Æ^J5N6…¹{ˆ0¢T.òMà1Ì­§0‹ÌÙsËxàÀßëS`wÅ›¡Z[4»†]Ë=¡ž°²º …B\Vq#ä¸0°ãî—H ¥¥IÏu}¼Ð‹IÆåõȾ™\YJRQMÔÃnúg¡‚0¥2§´Wª4‚×fBUɇ•@Ï%R¯î¸ Pú]±"—g&_´Ÿ¦¦2l”þ¯ŽÅVáúªVì¢É´ß˜eÖõNJüzj ΰJWõ ¢eö˜–žPQt]¿R•òwâžìù£ÈôÝé…K…U C—3ÊéÙýEggèÓÂ%õh­í×ãvñ¥M­vDøH½LèAõƉLð”Ïk†©tÆWÚRǘ.½T¬Lµ+ZÄ£‹_+¡ß…1¥;â_†Z žaÉ|yiƒ˜zY’÷\(ºæÚ¸ݼj 6nÛŒ?cX˜áŽ6Ó‹Ø¥7|‚Þ%yóÝÔ©Ë)œYaÚo£cÆžÃv­šOa¼œz@'º$xòo²ënÓ ‚>ÕÑݦ8› "›º9º9£H´¦VÆlùæõ›Gsì6¾ºDÝ¥9lM%ËAJa´šË1x-2‡*¸V—Ð*Ï—ši“t:ÝwƒàbIrò¶Â]Sì%»K’­s|~HúÁ12u¬¿.ÛaḺq—*‘}[¨HR~°ž<ŽN„i$]u·RŽz¶iß¿õiúÇ–KFÓ‘³1f>§9_µvt‰½+´@j_P·Òçí"ýÞÊת±b&ëí.‰rgâRMoÓÓÅaÇ#¶4MöÛBR€ ÅteÑïX]+SÚšW¶ EÎ0^†-49g=­(x»ýóÓ©9õ®Yç’™†ðq‡´8“ôV„“Ô"òfH PsúGÖMSß8õë…Šždƒ†ÊOýr÷š–¼ŒùhMvíROlJʾ¡Ü7-³ÚoÂ$±ó£¤€>Ý™yAeîþþžè[n¹ƒÀ¶ ¼ôíÎ&®±ÅMk°žøxÁ|¦”P*»ãJvÅ«m·.«ˆp^ÈÝ5mÄ_Å$ó—ÆDYó¯•êÖ7` ï³Ç(Sm:à™YN´|wñ)é&e³™#n)VÍ™PÂåF·X]@E ©rÙJgÕór’Ü™pù8|]ÔaÎiá·žr¾e¶qØ”vçë0æ¹y,'>BIëâ¤õÄü“êkpÇŸ¼ÃVt–[UV)µ]ü¬û¡í%fI$â10îŒ$$.²F÷SL>·¾ØMžläy–\ Ø©´صm YÉR†]ÜJwzu£d)*Lêݵ™}VƒCŽûоØÉáÐ"m6“¬]“rQÉŸFíϼ»¨ûÐõ‚lùmwÂesÁ¼êr>ôhýg[Ivio±1%’\RÛ#­Tª±aOpɼTe¾;q;@½õoF‚½ðf—Y„8E×H¢²¼+Ú?ד¡ÞMï“¿³®<²Ú/¶äŒ‘eWw•~§•G “y-;ü7šZ{ÜJSßeÔjfo6 ¯Ç’û\O¿U}KÑ£Ó¡Ë1O4o©‰whÛB|I‹"o† Á[¶sã—~&âªêª0¯†1;$™¤”–ý¢›ª@3ÐgÒôÕ¡d¾äµ¨±æÞG ?F¢•ØHz‘fùLÓ-|IÛ ®Ð]?fyH—pŠÿ -ó^½Ë|²¤ÒK Qê&t‚Ëœ—ýnÍ.»M#H³PãmÉ´Ã9ûAMì:/uV42ßKJ%x‚‚’Û[É9tE;À¯«×I¿C_³°õV,ûa™•]¨?7%S¬Ê4…ñR”¨óÑ j«~Z^s–ÚMF݇×á[ ’6‹uœe.#dÂ&SŽ`׸çÎcJ<ŸÚ¶$áš—[“Œ ñ’ƒxs „´Á;CÁÃñS/|柗B?æÒßÞ…4ZJÔ”¢ë|ºJ Ó¨ž cÉÁ´ô$yÃ* nyÇ8Ê<«§j³¬X­8ëuK…5<êîMaë$ËÌqÍ:Áð¨‹9 8ÅœwPáÝ–e7|×6ßAH–°%çCs!DlQ¹Þ»£¾Ñw†o7ÔãgþøV8MQ³ãf»þV͵§\"Ș¹} ¦éÞã@ý’°®ÁZ@ PM>zqï‰T!m\^}tí¤8ÊZäÓ›£X¥+‰SS³ó o#$ßÙòz7ÃaÄr‡·ÕfÛäßÄšvÔã²]o›‘Ü (2žQèÏ>|+¯‰MûǸ†&µsù¥MvŒ)”*e!Û^N =Wª™](ªîÀÿ†5¢•)Äûà{#ƒŠVçv]1mYáe/:ߤ$`pÛŽ[¢vÌH(Kè¢o]ãuW ‡di¶¹cήf]+ 7Ê^ÄŽ¬OÕâÞnUô8_›o—)몥tE“o9hZ²2ÜW5¨¹ºù¢~õ9âdÉJ¸Aá-òÓJÓ¯’z‰'v—Â:3#yUv]%µç‘QR|JpÝþ´çþdòËRË)ɾ_7·ª±å>evÆšðT©)ïڥ߭iÙRÕLÀpoâŸ\Y2–kR·æ%ÖóÛˆRûÀ#¾%”‹Jz~a‰&³”Q«wþª¨¤}tŽxÑ;}_«Ùö›¨b`×T¢êV™H¶ï6“Ì·9ä .Reð©%ÞPx"§ÑEøš“l°ÊÍ/K»ýßFÖÊóF‘èø›fjFrèeÚ†ßåc^kÊí‘^ŒÛœôW=Ê5MF8ĵ¤Ã–܃²ú´¸wi ï€ôµJ:£É$Èà¯Êq’Ýðž…y¾Wéë*?F’M¦V̵&T»¬ËürÍx¿Tq•õcIí)+jØrq´\Õ|c×ÂHêQJ•õA„ðÛJc€?jy¯L¡Úö׺ž², "ó®4·ÿ…Bã¿òЕ/º›ÙÍS¢mlËÕÅݤ*N]\HÖ6–2JUÇ Lh®›¾…&Ŷ•IÚ‹QUÕÒöÝ­Ç ÅQ'0•ùĥݮ6¤©•æ’‚Rª ÕŽ æÁ¯M>TˆB&ÄêIÚ“àªÄ´¼Ò9 =©Ï¶º)UðöRÚèo5)†ñ\ŽQ-,õëºÊbk–ï•–ê] 4­¿švøGC‹ºJ©ó®Óˆ¹DÝ—MQ‰Å=}&a›… –†8ïñ'ª’’˜Ö9q¢†‰ &è»ö¨UõoFª]±æšCŽdˆà/PW›³3*iê„ Œ;=ú)ˆ1zñ©lTƒžUæåvlÊÔE[©HÇ|áeÔ.•«f"œç ‚3öøBP1»‘®$ þ]½æÒ›ëUúÇßœCófQÛêÏßrqg5Wë{bbl¤”¨ ó(+«ŠHÆú•R‘z¸eLy¡,)õÜqKB†`®¼0=°«a75·Òx´7ª6äHõÆ‘H…·g ¢SI ¨)ç)­0ë‹=ÇßmH lŒ”®Ü¶g¶iÊ^¾’žŠW«>è~U3¼¶ÂÍ9©–Êåóvâ[º<²ëk×ÊœåÍê§ÅìúŠ+Xje¹ù6ÝAImäñ¿íÄšz£V¾2Ë¢êooÓ¯vüöE­ @U*“\öì¯5#ÊEŒ“.©ö íࣀ);vâvW˜C.JKʺæ¡aÆÿgX5*û5»õ©4ɘ’AÏYȨ"½J¡t&¶›’“ xÝeÅjœ©Âë¼Jáè’Õúwÿ©?æDÞ9o,¶Ã¶fŒÌ%’AŸØ+ÆõvÒ,ËÆœ‘mçun~ÑçÙl¦÷Õ¬i-“#f„e™ÖzæÓø”"ÊnVrc]k%ÔÚš¯-¯²€± X–Dú/¹*…;¹·š=é]!A“>oαé]Wá"÷tZ,¥\T¯VŽ3îãE¥6ÖJ­>Ÿ²©BúÜ8¥ ŸX¾4[Ll‹}ð‰[EîÆ%“Žt/%(ë©íÂ$lå?E"é®D8Øì7Çt3fRR­ø' ŽjW³²˜ýî@Ú *¡³pÛZmÛÌšKŠÕ®ðÞE¥yðïܲ›ÂµÃ× võãaк©&›ÁIßÏ—®eþ.Ð¥yÑz™m©Â‡fÞhBBßmÂÛál´ZRL¢›.ñ”ÒsδP®úÔìêSmŒmNn¡JqúÅ0óI¦XU2„K2¡ Œ04­àí]Hmk¯ñ§?Ê婯&àù;6P׳˜vÕMrfžªCÖ¡OÅ©'ëãHvÙ¢n¸à ë'ª™ÄÆK+,qØìü ÍMÌà„Óß}bÂÑrë*yÕæT«Ç0Àq¡Û´´.ÄÓž“Æ EŽÌ¤ÀV±´p–«}4Ç=ô)ÛbÝu2ÒRè¾êÏ[é‰Ùë9´$ô^ÌLìí56pÅ—x­¥kV#ä%F,ï.ú:¹çTê^J£€WfGj‚i”K"ÒwTÃÒîV™º”â~™N݇³_iöÀÖ&êéC¿vtìà Zs3lÜ-Þ=‡ß¦±b¸e^]Šj•^• ©'!z”Ãzˆ¬N<ãslÞM.Ò¹`NÌ ­"Ñóèu„š8Î+HÈV™œ7nhÓ ]t¤í~,Œvù½Ý|îÞaÝS-ÛçRrÔ“-i‰ö™YµJü{~‡©Pª,‰¤µ9Tª’þ>í/}رgÑiY²Ó)PQ(¸áÄFýlõ¿ÕŸÓC¹vZ§ò³nü!9)aЩ囋âý*§~ĵ»í8ëmz!B•õDðqÖ©iNÍ¿‚“¬­ßo¾$™LÊ/ºäóNîp÷¥%=ðÍœü¹»"»A…y+ðY‰+Öwö‹MIÿ‚éðD1¡îOüdâÓ—ÈVÜñÙ͘1' ^Õóäêþ=:ŒYš-"•ëe•Ö#“«al+Z•£Õ¼z"Γ´e…äL8Íqå^uJ1/nÛR(º… €*rH8åR}g°´•sNÖwÌ(Q*­ÚgŽ$R›éSXbÕaÕyÀ~Ò{R %-(‹«MɀÃèãk/糯-…ÎÔRæÚÖ›‡GtVêo]¢Hçì¯UD9j•ònŒkRªPlÏ`ë…Ú‚õÄ:•ÈðÄÓǶµ’ß-ÖÆy-'À˜™Ò 6ÿ÷ÑÎF³ß"Ò&iYt:÷YïRoZJÊZ*Nï¥ïÑLÚÓ?4†ÍZNÏšL8‰¢i=7¡‹:riÀB]WOă hü“?¶Ì´ÉÝP£ÿ†ã /EåªÖ¾Oñ8Õm»»wlO[ïµú­:Û¬àpB“øÒžÎènÛ˜ÕÞú«ÑïX²­ÖY˜š™]B×@†ÂUEWž†éé‰ûzôô¤Ó)ÕÝ¥ôQÍ\¶ôÓ8òßoSGø+ï]rÑ$ó* ÅF¢õ)Lðø€u2ÒËÖ–Ö¼*¾ží°—Þ”g_,ôÊWó^5üQäÛʬø´å,­ rñ]5K)$oZ7T¨d3‰C^–×@¤:8ªOwV{âÞu«>×›Ek0_mž)‹Mˆ¨º8Üj9˜šh82®HÉXo­iœjµ³RëA¾‰BÕ…0Ç~Íñ¤r‰-Z ¨Rãn¸¬+Åd¸pôP+Î08BÙÿLjï«U¾Šð¥{¢Q ³æ¥.áÔν¨R¨  ¾‰T}"~t0ú]MåRw€Oá÷G’kijYS^.¶^`^;\ 6bÝI®²¸§íˆ‘RUE\*æl”ôqNð:bÍÑ^º¦PóT¤w Ä{í‰=›P:û­ Âæ4*'Æ%´Y†rlsŠŒhya«]n^ ‘°ñE7cHUŽÉø–îo¥|J´–…ÚÕB¢˜Œ±ÌŠB˜+J3ÏŒ1ê&8nÞ"ñ®Ê$öôsÓtK] 1 ã{úà¦`¬–NÀÓœðÌÝôÜtñðß…:8¸ÄÑrõÖ\J¹”š{ÿ8u©´ªémw7‚#²°Üšûë—IÊoWÈ&ÑWP«ÆmËÛhoV‡}pï…X OÆ¥Ån«€ÿÝÕ<¶úé>³ ÙÖShã>”«ýÚ¼i —²Æv‚ä¿þSv3`«…)}pöÔZö[ÌIëGÎ4ñ·ç—]ZÂ7Q7{p‡&{·Ú¨‘ŸNÛ3oUGè¡ö“Qß ‡Œ²yëÝ:ꙢQtóûijÒ³uÌ•(}XE°ú»‰ån¾ ;ã˼Ôà™±ÚZÍÆóŽc[‰sTmË$× á'P²T>‰­ ©¥@Ç®ÖÊ-(lkš^G»Ýq`13jOJHHß/·Š&(Pj»¤Pyöc„y3¶”l™igܾÆ_&£êÇÙPësÖŇ+‚,gçfNC d¶Ú«É!Jâ\I+·iJ€ÛÖ}Äñ•‡ï]"[XÓ+—UBÛøƒÓ]Õ§=@¤¤;5|\—´PªúNK–À窷eу2fB/"§Ž´”e]‹öDݾϟeƒ­Ô3­•u¦Üu"oÑqÆÐ¤1ôŸShùÑd­.0›ç—ÈUôj1µÛ²4–Ëmɤ¶ò šR©y8›ÜŽ2 “ÊÆ°ÚÐêãj C‰JС’’¡y$skþµJMõ'òÆ<¦ÛN;)1g¡z½vj¬YöMF¡‚PŸï8ÐlÄJK§XãI¯Ë(¡=€‘Ù XòÓåÒ½j•êÔšš‚~;£Gô{„¶C ]@8¼JkŸMêum‰ ZV %#Vž‰!r—vó{iŸH´.Ó B[iÓÆ!£4î…8”%JÕà14Çm2Øy òB”Jé³iƇ lIÉ4â.‡9ò5æÆžЩBžRN[}û!s>„"U¡J¯¹_öèZWT ŒNî¸Ro¹¬HâãŽ]Ê¡„ºëD%ë¤îÏ¢°Z%Ë8Sa­:jbeµ4ªÒòq7‰è QþpëKW'…Ó»1œM¶ý/%µ¾‘3.û|²GX>û ­]ð¶Õüº ãET >aìZiÕsÝPñ¤Jè¼ôÇí+D¿Óq¿Rá›GlÁúìúgOÍmÊvÝõDÖÙLK™K:Ï ›Ê¡>Øbakvê“Rw Ý”¬KK¾Y¼ãKeRT;«ál]Ò@®4ËÝK¼fÁR/Žr=f<·ZJwJß‘)IDœšJÓ\«SŽ49üŠšR‘,µ8.jª£³Œq Ëm0¡ÈˆÑû*z~Ð6{,™”ºqXZ.³Î WY¹xÆ‚ù3U˜â§æ&BT” ¥%|BÄÞ#Þ•Ê4mƒ)¤¶”«ërr(K“*Z‚P†ñ5ÀñÉ â7yfµ ‹&eûvÑŸ¶ §C2i$‹ÒmrA¡ÓCEPáÆH‹ „%“¬ ôóáÏãX´P%Þ¼Suà*6aŒiÜúe¥fŸ¼”–‘4Tj8·’¢ÏUa…Oh„·h>.M̆ÑwÂu™Ept$:ÎÉ4µÿûç”NÿÃqQ`Ú³r(TÌͨÜúffx+)MGÁ3‚’@.ý6CãʄΰÞÉ?6 Ëù®úãÉݼչ£²Ä8•½$» šU“Ñwˆ?Ýÿª¨ý6Ô×—ÅW}ù«A<äý±«Q¼Öñˆ‰VÖ! +Ÿô^Àsr£Hí„§™f]Ý[(̦£ša@wvF‰È;?hqR›ÂªÃ3º´ æÎ$e‘ ‹ ]A8á†úáïÕÝ@ŠG1ù«œMÎ%‘‚€cLp÷ë0¹ª³{\)µJPÇŸó𣠯ÎM0Œ~[ȽP»rI³^›ø©Ù²•½Ý˼/4èRwУñ{"^Õ–OÅ>Ù5‡»bÐJÅTi{>£QòpdsÚÑvõàAÃztˆu®)ZŽîÊnì;âåUuçùWº&ƒ—5”ãnçŽl!•¥Ãx©yl®<ô¥:!Rx\½}Gn;ÍE:¿‘‹9F;h2ç"0›¼,îPÏßùCîˬÝ\œ­ï÷dþG|<ÅZ.Ë­H¼Œ÷Võ;âcPÔ©Z俊ªWfÙÍX”ÃH£hn¿Bž8DÚæ¦pƒ¹ü'Æ”whRöà´Ÿ\Ihìüû•j]Õ¤þ2˜²4ZVËûI Er7°:›Jý@D¤ô¢&®Õ¥JìQAí»v½ÑhÙrr–Ê@_êKÄ)RÆdrR•,b)ˆ‰é~ªjµÝ«Rq¼¥3ùÛ£Ê mÛšum¹/'2ú=«B’Ž*¤öœŽ#•Í”h·’{Eä•θãTÄñ*N)!tœ³ê¬hž„X¶Q»=m»÷Aà q"ž¼©Xy«6À“3O­kRPRIÅc”¡Ät6œ™9¤só3³eÑ-6÷ } ì–;n¼¦Ís¤Y¶z›nêZ”ÝP9&‰Å$Š 0Λ³‰î#Ž­›6ƃž-ä¥eÒ¢ q<ÃföG•+I+mV\¼Ò?>h̾±)¼35ue-#1BâÓ\w´%-¦×NÏ%æý&Ö•wFŽ?gJËð…¾–þõ:{¨|Ê.{õ$®MŠð›¥W‡ÝDÔõÒï<&Ó•ÖJ-2oÎL£”>ºÒ”}ãFô±»ÛnQå–¤mê}uF&­ºExºµÒªPAXÛþª‡ôo¿ßò‡o©„êr®ú¾<žøD¬¼«œ%÷¯ä(ú£H´½ÕÍ«Gå(g2•S͘'«º,¹G'œJТ­f ùØóå”heŽÍ'D2禡Nó…9áZ”µ}ņ¶ùÀQMÄë)·|i• ÎR¥Ù«ï£”‡œ >2èAeÚ¹cÞP-{B®¹e)þ•ô+ª—ïa·ÍOå_HY´øCl7ô)ÏzEÞ½¹Å‡åVÄýŸI4zaõ^u3(Ëê,ã\sê{Éþ”Ëþ­$ãg rƒ€Ÿ™zÿjqÇ áNl¹™P䥣j˧`uÑÿIÎ7j h…¿$ñrF~Fn_ä­l:Ù?U`ÏXáýœí'ä^i´œJHw¹¥8{DHÛÍ9Dº(Ó/´z*âR"^Ù¨—˜^=8Ò¨T5îeèg@©Ê˜ €§ÒßÍÝ–ÞI 7VÞèm”0†šêÒü ×e{«·It½Pš$æIó÷ÆÉN@vŠtçÝ Ãu …e\éZw!v}[F©Y€h¾ÚÛ­|gÌèšgÐ È +uöVSLÂqç 9 Ы6Uä• ³J÷D­“,ãÁK OD*˳$9\ íª’:øÊðÅ Êî´ÊUM(”tŽ04ê¯l;nÚ¯ ²²ƒ\ÅÀ{^ªC’ m%HQ¡ÛŽÇn1gÚÊ’7k…(žRñ þ¾±¤zO*‹'…™yËúº]Vyr‚Mz¡´ŸSN0ü¬¡p'„>¶Š•ŽÚ*òŽóL7GÁVÍ–Ö±ûnY„xÏË0Òv€jnÓff½Q7?>…‰iyÇçÔ+ç[N¥h:Å%öôC6lìÑJ-Y—-EW‘zˆ…Ï®½dÙOJKÓXŠïº{€… $ÐS§ Žoåkm–èO‰©ÙÍÍs¤MÈY³Õ¥¡t¹Rkæ÷ñj+Ì k²4žÚrÛµž›(q:ŸØèà說Ÿ­Mµ8ùN©Ç‹ó×ùÄ’%^n¯Ì–Wü ‡Þ„)=ñf&Qǯ;1Dï)sü5‰VfÍDܸ u.ÿìù…¶Q­ûiIoþ.®,9™§^™(^©é_‹Â~ñ7{ãÉnš3¥*e^y&Ô³©äT^r\`ÃÃÒºš4²+ŠB”j³þ¨áœ[s©””[—Y8S«3Ô ZÓÂÒ·*óÊT·¤”¸çÝBT¿»gl·g°R—’6{+ÝN™¶^µÜ˜RÜ™#RUx_œèSOCfð™¹}H*•—ÅÅ‘q)®Þ56ÐPW8µ¼¡ÏK-Ù”½(×.ra<@o|šÝ¨;8´¦ÁÖ’i%ªLÍ ·Ü­l):Í>¨˜—°xÚÙ‡Ôë›+ùûbFk‚7«$Wh‹vÅ”·ÅäÊ¡/lX ½‡>ã•2lX–´šœI‘}M²M÷›N°]•Š®„cÉÀgH³m›FRo„Ù3ïI>—.*b« &¹jÀ*7Hñ¡~]^³'fihšw-!çSžJi­cßÍç²4oLl‹nDLÈNJÎ1æ×‡?бÍSd!"i5AeäœêAæ¢ÂM)¿lNèíi‹Ó6|ºW˜-ñOMSë®XDΆ¹,»ÖTæ¨ Š 5÷ꉖmû1:ÅÉ)ÙQ…ík*4ßq.ßîê‰kq§šs…W¨8;îĵ¨Sƒ%ݸ)?÷Ö³nâBM{GhÝ»ò‹‰tRòS·gˆ‡%w(â>û2‡«]á‘Ïi;Man)|”Ò.øõÄ̹q`©xr³ÈïŒMðr¤:• h¨¥ yXûáí%å_i@œ[ +)z™ÓŸLªA£}†£°˜šmN¨)²H¥+ˆ®¥(«ž'ò· 5y¼V’¤ƒCè›Ô8sÔî0äâĹ2í¾+ÅVg¬a÷¡»vqÖnª[º£ÛNøaËA9J^¦AJNýœ`3Ï(´lyÛmLk[Ô·/0—uu8\ÅãqtÎeërl2ªb„¤7yJîëÙº-4ëDÎL¹k8N M®ì¸­+ÄÇòÂ&lf¥Ž¥ %¦iK¼Bi–i'¯~س¥\áe1Ìaš­[B§ 7eSÏUMfëO’F;¹4õm¬N¼–Ô©à>y=Õ,ºvÄú×`Êß¼•”L­*UÈÊ‹ÉxŒU²y\"R]ø³{´xÒ%/•ê𿸨òiß m§yêú½Í¸ÛÒnÉÎ4™_ìï$]*í­Hg]u*C—Wí)BÒ~ÁI!ïø%Èòe¤Ó1o-þ[Ô|f¸-±wå%IPN±*( ÔE‹lJ[–{„šÒ¶ÝO%WµnRk»j ©$¥$”õ¸†“yjºøŸ ˜ò¡¤ÄÊÎÉJºDÏðÒ—?ÛŸz4mÅN†¸2ýq=‘¦’ÌMÚöl’YZ‹Ÿ´&õ)ÚxßV±iJ·##fY쉇jÊs55"©NÒ{ãE¬«bJYä áY€8Æ„®Ö•õˆ¶¦Å“iNÙ„±y$Öâ‚Ø^;/e³¨LËSFó†Ï7åX%C“Ì!*Í8÷zÄYóz™Y©g%Y›[§Š¥¡8­–Üéô›–v[L¢âS0W,•¶õYHOB®Â& ‹m÷/½ñfêì«4Ç5"ÀÓKWC'8EІÙ]é_³REwL|¸Hi*„œÛbVk"ÚÉMNð¥Ýo¦G„YÖµž0pLâJ¸îᎨӯ•´ÂæÛ—PvøCóym¦ Zw’Tž)õ÷xDþ„ÈÏä“/ž) øDöÚv ŒÄºß~V´+þ_G$ô ›Ž2âÐ^þ\JZºóV¦BöÐ:±¦ÈE£6iWS\kÆ£#µï9L6ÐïæÛ%.šQ);¯ Ûú}öÃí4¬*N ãáŸTJ6Y” ÌÈ÷cùtÄ´Œ»•¬²ØÇ-bí>û¡æÓ·Ftµ§?6|ýðÇàØîð;〶ºk’8õ½Lnã·3MÙÇÀ^ºAªiAˆúØwÂì&ÑŠ[ïIÆ„BEzvm†,°0. ü½ëòréâ–Åêçͺ™FB®áz¼š”Ü)±tsTì÷Ù³ŽW/9^Ié­®ž0ÚÃs!ÌnÖ—®›½)L5.›ÉURIÌš¹>[âeWq*wÔSÆ<§ù@L˜à6PÖL&¡EEo/c°î"'åÜSªuå©×e¨ÔžØ™bë׈¢}÷BAg(@¢ùp´îê»à-ÆùC°…~İ*F°%W7Gq÷Ei1f:™—dˉOì©+AàOŒuŸVôpö§mÜœsZ—?h-%mÝê)IWÕ<“ió2“r–\²nH:›®^PØn¨Þ¼Ÿ’ªaˆÉF¬ºÛí!æ•}·… ©8Œñ·üþxÒK@JÊ8áUÔ5ñ‡:†'êƒÞ—©V¬â^Ir¹•Ÿûa«vUÃÂåÔÛ_Ü#.áH´¦3¦Ò3A#SÎBF[‰ î$¸Ôí‰j2o©º¼R+{:œ*:vF‚ëÛ³r`jÒþ'“ÑòI=ƒ~ãCe­‹nw^SäÐ$㻌x»=(P(UÔg»ùÀ]ï• g †Ê)Qz$R/^^šHÈl4òudÏ&fÕzh:㧈¤¤¶£Ó\z»b×°­+araw’Þ2ï Tš ù45&¤ma t]¼é íî=ÐóLÏù‰×eÿ¼l8Ùð #šy¦³vçÀÉ[Ö³ Oí3ƒ^”Ómô^ON0‰+NfϪß&`ø ô§]z¢Ãff^N[…<u®-)Ûê&.Ã’ê©óˆy¿á”q~ý"zÀ‘™>a†Òå9$^³Aßh›’Õ2ËuŠçJ‘ÑÅ-jJcÝ*“ü¢^uH¯ l·¸]'nUMêa^è—´%Ö¾-wbëÊõÈÅI(=> ̸|»![¢µo¬jµ^<¬Æûñ§ljBy8íÜ{bê=ëì…±]žý°¶oÓ·¡´Šž'FCnßÈÂo¥Î2)С‡e{±‹Š/_ÙÒ+ÎwÃTbÞ¾}‡¢'å[u7‘ujÀmuá Ýia»Æ¹PœzNçŒZ5)/E8–ON8ŽjÆ–éŒã¥Vmœái½¯ÕU8ûþq7.âßyÇÖ·\QªB‚‰æÄœæ'¤oT„ÊDôŸÌ=†`#:²v`µÐ’|‹9§/ܸ§•èÒ¾1!£œÂî¼ïcÒX î­îèšÐôjCaaJÌÐÔe³fpÍœZ}Å-C|¼°ê§ª±bË&Y2³Œ!m†¹jK©ÃªõãÔ#É~–|)g"Bihá q¾ mÂpEkuAdáp¨^éÿ>iýXºžVê£Ê¶š¹$¦¥*ó¿´$!ÃwéQ턽"˜—rNÐrg+­Uë  ë"-[1ÍX[³Kà¼$J^XY#øeÓçÓWóâ}´=kÙokuZñDR¼j¨ç»×†XÖ±¤’%:(‰¶*ô̈ PQÕ@ª—ºIÇ(Ñ{V²Ò²OQ·Úi§ u­Ö×É]ERB¨põNB-ý•ÒìÃ$&|â•GJ¨œyÕH·liû*eI™aM\8š¥@}fÊ„!õ'E:G¶20>>ÈDÕ9+ñöEk´—XÁ½¹ãöBt[s¬Î4°+þê¤Õ=qmØ­ZÍ8\Í+-ý»Fð"ÜÑI‰r]Ô9ô‚ qÌSg7>Xa ÞGm,q3–Ÿ•ÃŽ¦î¥)Å]e9ìÐ C¤%äì¹F˜ÇÏN¸ÞgÄÂ7Pv@m´5u±Žá³·e´¶ªd~“ú§([¡/_¯nÓßß禑Æã\P¤Št×Ç®i6ë•S¦U8ÐøAà.WŠŒHù&ž=ù‘eɥѫZáˆáãÈjÐuYî N$Ÿ\p)€Å¥cÊã Ðg™Ý²•4šBv z2¯Œ4ºT.¼Æ„øTcJ;<# ݼ+Ò?”_O¤ ¼ÈÍYó+Ù ›–lݽE}† ©,µÕ V¤åìÎiµKÚŒ1̧ÁT‡­9ÂõÖØN¼½'óÜ"j}é6À˜F¯ ëEiȾ2#o>ØV­¹Š%‹ûr§eêcÙëÓ¶š†¹ÂËdžKTÊ©'gçèü¼Çœ¾æ°b*ÊÓZiÞbkE&µÅZð¾™DöˆLÜÖ%ÅÝÏâ\tÖ&t6eóÆYorCN/²âL?äÞÖy눾S…N­CñCLæJ‹j˜RW˜¾‹˜}l>ô7¢ŒØbðq+˜¨¦þ›ÜžÅaÉ™[G…N%ݵºP{öD–ìÏk4ÀZWÅÅ[k€0í‰bOµ4ô¬ÛJ˜sÍ +ÖâP‘ÖD.RbÏK;y¶Ûåâwþ]êõV4CH‘aÚo%·5S$ê²›¤di‚‰®àbĶ´äåÜ+ qÑD© Ñߗnÿ6 ŠÆ–iv=–©å¨%(å(…>¨g¨Ò{rjØ™Ÿ™˜qÒôÿîõìþ¯ÐoÓì“&–½aLˉt-²Ï÷nLw²—i®“?¤Î‰VåRšKÈ@á>TR:ëNxU¯0¦YuÒC’¿=W“Q€Ç0ê¢Ó¢Ô°˜aÔ!ÔÍK†–0VÞì@íßfnÆÒÉT[•—jY`‚ëuINܦõÝNãZÓG&Ñ4Ž—<õ>.¦˜bN)•è®Í!²$-97d^i°ÛÆ¢a)ã`HôJÓ™õì"Ñ »Òæ©åJVÕ0ÅëGÙÃl_)UÕg¸q¼+6¤¦úS¾£Âµî†ÝT16¤äotÇÞF"¸l#í &ˆ£¨mÒv)ób#E4†ÌaÍ¢Ùm;ÛBˆ=! U;1‰v@BBAeb©X#§*Ôa¼@Šˆ)PHYéÈÐÖ¼ºÞSq*¸xÔ öS²°íOW˜Aì쉠—M ÝOrÈSBðÄtåžXû|a¹5ª€3ùTËoûãA â5j]Ñü@AîIõBÛ˜k÷Ê5Ù]Ùf}úâUsi$:…„cÆðËš¾øÂ_Z¾1$3ã(h¾qSƒ©5î4Š¿³²)öì݇ƒ·ŠÖ¶ŠlÇ:š'•§DY¶’WÂþ@{YÈ®5ì­:éÔÃ:éY™u%®XXîŠõVšüIÇßL4”Ê?ðDÓ£‹N W^8®ü†,§Ø´äš·l¶®]3rÜiGQÆQ;Ù¸…R¤ à#D-"`Ê,åÍmÜiséRé?0£°oe֞㬌ë•F8Œ)ÌpÚ"}2v£e§%¶Ê@¼BM Ñ8Þ 6E©äzVa:û9fUïá’ŸµÉ©ç"ÐkNRaîË*ë˜ —ºzõ”ŒéœLY ijl5ÆF lç­!«4ŒÑNÈ“W {¡»5Ãïݱ'!r—“N±ÍŽ(Ñ{yÄÖBu^i jœ5=DH¦óAÎ`(¼ ô„[¦Úšy–lÙäJI¤ î‘Sž@P¸0ÚÛ”JØi“J[åKRªºœ¶–=U„¶Ú‘@jO6}´„Ë•dšÀl!U>²‡¬Á•—ýÒÒz*?—„P³²¹×#Ÿm)r’ÛßCáëS†—VáY‡m£GF9ƒáXŒ,ù¾6ú%G²!§sMaó Ÿ:ð®ïåXYA ^ '`ÇÃ(˜—´ù+™ÔPáEW?¢O|"d±MaÖ•/§¨uîÙÏ dŸŠHÙµ#ÄÖͪ TV”¶•b»è4ÙÉöü@‚úe‹ü2aµå©£*ãoä%T;8Ùç«…M“*Þ±{…+ÖU@"EùÉkδJœ#‹…êsW›,zaÅ5;ƒ'TEjn0Èb) ´—Áf˜SuIëÙ^¨~Ò¾½UýÛ;iAÛ,Y\¾5Ô¥â¯'°õC"&™3*Ô4šŠŠ¨ŸªÝãï…N?1Áê£4ôk·qÛÕË®Ì̦U¤_ øÓf€õV&„”«´!)Nà’|qï„$XÒCtuç¨TºâÇ2:)C\7c “ønYn¼ÈCmržâƒ2O÷Ŧܨp¶ÂÒáT纠eÓ˜Z}\TZ‘NìÆggªy§¤EÞ+܇¥wgˆúÀcXÐù÷,ÉÖgZ˜)T«÷ßO\Nü¸ßRñæµÑkÙÌ¿xßø¥$?Õ¥äý`?HÆ4ÓI²d”óŽ!¶ÑÊUoSê¢òP1¤ÚI7jZ³S³EWä&…W¾Í@ë¤K:feË;ï'ÀÝÑiY²Èf섪™;”´+þêB­&ì›!µ¢‹yÎBq©íË®‘;7?;/Â&¥µçeÅ!>*|!™‰Æf¥&Ô—eœkÝBÕAÿ *¢»«Mô¹›I‰v|K¯Rú'/&‡—‡@‹fÎ[3œ6È|/Ï™¤¥5BC¦è2ÕZQTašyëžáÕV°œ* êÞÄz=œ]¼Ñf$3FÅPù5®uô‰RØ¥Vž6X¦4®ÍDÃ/2àKÚË©HÄïVd9¹ÄZ3.âû-Vø£œRš17‚së„hº²›‰ ΊRÞH£ˆhâ€; :iX”±ŒB+\ªüDV*žRÀí;i YRR뮬ßÝÿõ'g<uX¸h!3’ÎIUiLíX¿È ó׿8àI.ëꈻƕð[$ѕשCª¤n^aœ¦+ÔGtO2óÿ½-¦»JÓzŒ!‰Wå¸z+²ð›Õ¦Ã…z©”*Y Å16¥K ;ˆG¤1îÅÐó„ºñ¦gzd{1=q(E8"€ÈRŠÇœÔ{î†ÌÊ9JH÷æ…„©ÊŠôÒ5 ´ªÞ½ïÛšÕ¾ TôšCJΪfd©+*lƒêÃìÄôÌÄ¢j/u{=Ñ-jÏ"QμMUð“œLL©÷5y¹•ÚÞÇ óm‡8o¡:ࡉ8gÌh@æÎ“6žljš¯Š;ŠÆCßlL”0¨N­`T0*ÄœÅ~IÕN8Ê8—8‰Gðà:ÌJ¶Zh-à5ÇT’wSi×JĘÔñqÇJ[f¥GJl!5'ª±+l:Øœ— k«¨JEúöVŸ[tL½8ãÚŧÌV„‘w»M\Lð&” Õ=;¦¼Â¥!]Ñn¥ñ3D¤ åáNžÄþ½Æ¤æOks¦;“RvÅœÇêI‘¹qyÅPm¡¡# Žß²-¶«Qm:x>¬ñªª]ï‹] Ì–ˆK;œ×pV|Ñ,ûS²s-ê×/ñi¢¼iNø3oÞm(óeÏÚHðÑ_T˜ò/¤ªh•™vø“ÁbŠUÞÀkõkRV›é5IÛüñý.jZ*ÈG–+q‚á’eµ©)øñR;y_V±:¦˜sZ­G¤G¨ãÝë]Ô¡ÒÛ|×¢ÌuS*ºý·T Æ’È»(¦ÔKMž:FCßš±#*Ï%+E3ªOvtê†åzžeòvìŽÚo‹6ÎL¼Ã—J@·jŽÜ‰ææ¤"aåÙfQYx>’šƒÝ깨°­†TCsã –HÇ•‰§G>{asrvtÇê3Fu$Ð[ÉP¦© §IÀÂ-k!‰ w§Ü—–½çæYi\üU¨+ȵ´îÆœËÙ­ð÷˜ —Ô%ŶRzÐ^ÊÅs…ÚVÕªÇ-Èóyµ˜Ô;âZÊR”8êŸW9»†Í¹SùCMJJüa ¦$ú’kØyâ]‰wOˆæ I6“˜^Xlδ #X$œ2$ãÕee²fAÑêLKÏË©ÍC\g=Q¶”Ù \–ÅÅõnì¯PŽ+zæµ5ç¨ï øPmˆRwcJxwÁ´”rMzŒ9,ìÆ%JNÑA©@']®ÃÍÛ·ª˜@oŽ;)^ü Í‘’±ÛŸçó~ùÂJ[“Öêáëï©´çNÜn¸lêÙÛAìʶ֩*K®V¸Ö½ªu÷ÄÓ®­%–š«»y"ƒé‡N1&˲ð§•}Ì@B•\pÇ ÿÎ,×Y´¦Ä³¾on«ôBºq‰«Ó(2Rˆ-äpó`ôÞ)‹EoËLéó›«¿ejréë‰)†›KJy!o'” O~Q;1wâÉ"¹ O½y¢B}éyAÓÆ½^úŀ̜ÉU§*JN eI¼Ü 9f+ÙDãn´YÔ–ŠØ¥j'`Ø=Qi¾PƒŽí¾ìËNÉ y€à¯3^œ·m…´ä¬à¡¼+˜Ç®,}1mj³ ¥°å]¢°Ç:pßz›ê)g ¤LMMºxA]jJ’ŠÐSå÷Ã)[ÞRÿVôÿôòþìK<á´>1(ËîL´b3ã#üXÇ’›Qû:ÔjX’ âîLaEn©Á]"¢,‡K²˜þ#›àÒDÖ€gŸ«ÕmmpÛRk[ûÎEAãwaךK7\B«¸4â¿ L8RÔͤƒÌ ¯ÂLJ»ªzú]7wÑ^Ê÷CdÎKëJuˆô¿#tRײ\Z+}–¹gZػԧ?T“Ó ÄR¯#ߢ½ñei^µÊ¸«›®Ö»öbÉÓ)-MÕK^VêŠþ.è—˜±¦›/.}2ÔÈñ’{¢z¢Ôòˆ‰T©»-(u"¾vмqÜEkчHK1½*m*=¤cÙ ’-y’V2¤áÒ‚G|L©æQ6R+¹[·P²§7DÑUìky+=$Ó®ÃM¾›Í½ÅR¢˜ãLLÚkd*ˆî&¸sxÄžð‰‹Ž²Cålìü¡l¹;ñ. ·¼P²h®è–’–V´·z6cÍLaù”8«­’£†Ã»”Ïž –§ ™|²Šì$äU QËòŽpVº â4ÙJc (ºT)†8V%Ÿ™xU]ÞEÚu*‡ˆšqä§-´ßL«äBi^¸4kqˬg ˜ 7[Ã>ܶĚî*ŽNÁZíìï‡åõ©¼Ú¯ á^Ø,ºÉó߈ÊV&í@Ž*wåùV ­á~ù»†yõ{iÖ£ÁíE´åMûQ÷ñ‹Y§Ðñr¤Mv :öB¦ j.¨ÓfDáPwBg\œ^¨'ŽN ÕÝXaåYÈ/¢¥ÝäcN‘¿§š,[av”Ï-Ü_¦(0¦øÒF$Úq9D•âƒv·±Ò´ë§ŒM9qd§,zyºâ^qL‚µ (äaŒ;h¢`\NéõÄ…¦,k),0âï8Ž1»vðºTG¾–ìs§Â²úî:C/ÓÝYÛ…v™f}QmX¼ ʯÒ6E¨†ÎK»ïê†Þg‚–”°_9Ô*½´§|fS4ÒJNb wšøÒɇg/J¹æÞi)¾€kKÂ÷)$ çLr‰Õe\Q⥯Œ9ÝìÅ_V°[©§“ñNò¿êò‡Zc…-¹›1ÄÖ㟴/Vg¨ÑÆIœá(³¯¿QNNÿË8Ñ9’ü¥+_ѧó‹fB`–q¤Ž¢zØÕjÜÜñï|Xr¶w»0ÃjcÒRkÝKÝÑåEìF¥¾±ÂeÜô5‰Wp1)G•uµ$«qRQøÊDYö“Ò«\ Š“ë"-›—³gŸQ%EJð%ø¬v\¥ÊÒ{¯^î‰9‰Þ¹Ê¡¾sØY÷cGåíi€&æïK¶±‚5ªþ58!J) ®$t3N;(Chq78¦‹NúïÇÙA „ìݤ—[O˜·\ZhzR¥ =Q£Á·™Ô–Í݉ » “ZTºYÇ䪔ì'¶°»f]‘ñ:Ç÷Ëž–Lð¤^O ¢ø®ÇÁO9u×5l äA ½ö¯TM>eÕ0¾5r#oN)>Í‘+>&uÜ1eÁòh“ˆ$ss퉂ÍóÁŽ™lßÅc¥R¤¸ã´­κƒŒ6Þ@Xròs¨çì=Ðó H#X‚;Í}cÛ¢ÔåÄ¥55ëæ„È5M}úˆÙž-)Ý}©=‰ìW¬E©-.å™.âgãI=Ê÷zà5V®Šj·ÔxV½ÐòÂ"• nÏÃЦµ¹ˆÐ)Òóê»Æ¹Ž², |ØkÐpë†&BqÉ¥© ¯»¤âsr™uáŽêl¨8Å›j/…¥Š›¤Ò™Ša–ãü뢂eªG§ß(›]hžŒ2,é»ÉRž4N@׫°g‡ŒL>•&êUC\¢B×\´Ö²õÔe\UÝLz¡·iIU©€vå”»òÓ$,žÜücár)í¨‰–”츛l_®Ú·#‘\•]Ýa ;N#¢,±3fªq»ªZ’FĸÓViÍb\Im_) Bÿ 'Õ˜Ó¸³N³·£t?h2®I»³8™Y Ö~ïÒÏ»>èjÛ¸Ô«Nšj3NÀ{¢Ì[*h‹EÚɨqUunŒÆÄ%js Ñh8™E’ÂÎxqi‡AÄvD£“HKŠE.à7“[ßDª÷u"ұ߳îð”ob›Ä{LkÜ×^fªòñ¤YÅíøYÖR•¼ ‘@\;Ûð4÷hΦд&œ˜Q«œŠ¡F½€Ó®‘¡pÏ5C¨¹¥•+ëÊ4™Ä¦{Ä®tðV4T8ÔÔõÔp\ê)Ø îè}u\Ó¡Xç#.7^κE¿ªvijR…)ï‹»·³“õ©´¤K!M¬¨9È)ª«ökMøÁdªvU•Џ×,#Îýt^AêQ“HF°Š#yî8÷FŠ1§µw¯¿‡£¿ßÑå…•µ'4å<Û%`áôT7ž0®[asÆÎ–áWoñ®Tú{©Ÿ])ÛBÒV½ÙÍGÌÓ¹*„hí­3+zZoX9®ŠÔ$¼"rfi“ü%‚ŠgZÜLNN²ìœ¼³+(?öã5äêÒµeµ: +

ü˜RÑ™R*0è 1٠̇\-*—èp;z )ï°Bß«7[4;‡½!—V]@qÊ_äŒMqÛO\O$J¬¡\EŒÒ6uåß3k®V}©rá[.`0V<ùa×HÒÄË0*µ]{bG6Ú€¤÷ÃÓ@üY¯NŠ‘fÛ¦Ãø'`8Ž~~ðôæ½Äà(á!;z¹©ß=h·cØSO¿Z-\D(’¿,†:gXv~FÝwQ"šcGTiÕ~‚-»%ÉJ:ò m“;qÜ #®&×ÝÍ=#Û hÊ\°jqŠ5áv¸z·DôΩWocïº4mìÚ2-´QL)@ã˜ét#[)´\e³æÛåf.Ó¤ õF‹¶Ûö½žÑv­©ûî6oRïêÆ<¦Î!k—i@_#’šµ¡È wÇÔ Êjƒ˜^=‰©†çŒ®jÇ@¯-—jœ¾U)>Ò}H:ÝÁ WzRG|hõ¨™K:znèMG+~Ð)žb-ìѾ¶‰W5øI‹-+bBm|ms²å´&†¥[°Ë *®.0õœêEKJ£ÙŒ[¶Ór!–g)ÔË|RA½×…@í†ä­ÛE™V%bÌ—o”ëˆ #ê ©]€Æh»y©RgéßBâõD™m¹k 4½ÃÛ—|hC„Zú³ËÝùåú<¯ÈªgGͤ¨¶ö©{džže¡ WxÅ¿&°ßKnS„_ä+-ùwg ´/´Û|—iÍ\yé—>ذ˜zQwuzÉJë ¨÷k{î˜Ó«6UVŒ´ÒR–Äׯ§*RƒøtóÄËiT쮡«ì5Ë^ ?J„õN4ÁÆØ•³f™¹j6(©¢ müÞ-: †ýðÈ•›l<°Ž4ºuR“M˜áPAí‡d‹j2KEÒšÕXeÛÌa¹E"]Ðë‰"_“ÖwÐuQ¡C†TÂ<œÛ„̦Ι 7³Æ¯wE2 ³œ—Ck÷#Œ•˧·dŸd…$î®Íþæš×WY@AÇn}‡\¿œ`êùÒhz01aZíMÙòºù‘­ê…uñŠq®ÁZóE£c?6‡_e%À¾J’¡Õ$áK®Yåô 8ª½]Q/?Áß¼µ_NÌýùBP‡×Ñ1h½+l3~©mãuIpkZr+O|Di›œU8Ö¹ú‡oD)ðªÝTIÎ7+cÊ8’y%Ai8PaLéÝ\¡çš˜®¥Wº+ë¶ÌÄ‘P ¥"Q&›À=ûaûFZBh«V eÜè¦YûˆšµXSué<øFŽýLµ9åðñ¤JL[M#R—Ýôx÷¸PŸ½ö•£*«¬K"TîRTçz/ø‘Ò UÞB¥KKOâH&ª3«˜&ªo—Liíê¯èÒ«?áMµdþRå[tޱ‘­EÐ1*%7@牻9çäuëSwú`³K^Y¦ÐK-òܸ „ã±DžªóÅ“:+ÄMö÷Ý ì9ôÒ4¦Ãø^A3 «e¾EÒ<íÑbè|„Í%*øØœNáÌ3¦ñdh=šÃeÊ¥·Gñ¥ì9ù¡ËoGì FøDë>lUiI[ËN4¡CiY®ÞˆÒk=£<‰¹—%çñ—q+Oê’BÓM¡Ä Ã!)e™© ´¦®<º\ÝŸ5:H…2¹GP½NUH¨ïÆ,ŠJÛ :‡.!?(TS©"§²4nß—¶¤¦Y´Hà©«nš©G*{áÉç;â~~úÖ‚ƒE £ßšqкœyáÍ žU뮚'+Û Ÿ[-]CÔ58U[z2í µí7&¸8W —®Ò8¿j‡°ÒiÄ5i̲Š]IÄr@æÿ(—B§W«Hãîñ˾'&ß–p²²MÜ÷øû÷FƒÚ2ÏÚ’ÚÚ*åoT§ÍŽ;«empùê!Ï3¿S Ð÷c Ú–<@±|isÒ4›ÈϘŸâäÿVC˜nÐFhZ:™“æîïâÓ<+ÙœiJ[nÑšj\ù´Ÿ£ø‚{„hÌ÷¶dÕŠÛsãÒRª õP÷Æž.]3¥–PšPQ&'s 4ù𠍍š`=ücLœà¶]&Ÿ4¥ð„$Þ 7qÅ»Á\oD‘^h,>û×®8FûªöE•g®GC–”´±39Ëâ›ÊÝE-©çJÓ#0¤ï Oeâ#Btn~Çn}smê]}›§XÒï(s´µ¤t“~N¦§Ÿuç§R›ùf{iXÑÝbÅš3iš8‘Fø¤z¼i݆ˆY¤ß ™BЉ®{a°%ÔJ%ÃΡâh;áû&ÌYk]*•!9×B,û9 «r Ó:Ýo׌<û(! J7pŸHM†1M9¨Ÿ~ø]¢¼kXŽ`R=óÙN»üUzUÃvTÊ £8ò®ºêu4Í(#§+M!ãtñNìi‰ÅN?°ÄÄÙVUêÀR¾êÖ&'Ò¥]Qæ©&€ûaÖT¿’"0‰û rÕo„LYieA.*‰9PÔÎqž¸Sl8Ô²%ªÛhyïDÓñøÖ¡m딲ï6?†°ÛózëÏ8„§}ü50˺ÿ‹l#£òCf&^Ìš¼\òAêU:Àý:Ed"Ë·-{5Õj¥ØŸ˜ÔŠR]Õ—¥°H4ý]ÆÍ)†QmØSVwë-­*‘ÿg ªþЬX7VŠ%ºžŠxÁ²|×qIÝTú!»-©w/¥µz»"Ôqå*ô»ªC›[½A²›†úã h¼”àx¸—Ý~lk×¾,óAÏѨ‰àÒKýaS\kØmÆà4»ÖѶ‹¶U¡}) n”)Þx{cHî Fa¶HXÞÕ›ÔÇx¨‰^Âuà;Å|cÉí«$‰•5:SnrïU_„*4šU¿„&Ü‘V±…¨”¬0Ã(Sª<¶ê—RRk´Þ!²]ÅxšR¸E ÄÌã ýd‘Ú=´ ÒǬ™Î0@Q4Uv ðêé&R§g&§Š‚K§ˆ”ƒv‚4%çYµ€}j8Ô9Šoçêæ‹u•*mÐÚI¼cDZr^Ò¿ˆM‹ZJjbcÌ!Çsä$˜Ðy'e'—0ëN™u!µ\&ª¦T¡"-k&К.7/0´ã“NxXÐË&~Ê´5ï0´·¨-Þâž1Ã$’¡ÙA¾Ú–œüËüÄ¡ÓVÈ)©=ª+W“ëA‰†ž˜y ÜÄÕA]\E(ãîb{AWhÌkŸ´+ó}±!äöÊ•sXfqU®8z¢fȳfµzöƒ×@õp¦)XMe1ñr,Ÿª?ï¬7ªº[àéÕ§ž/v;:Œ]ÚSÔŸT^»’bòNØqËœŒz+ }´9ªSˆ ôo»àÜHÕ—p•q¿3Ý œhò—Nßd*q(¯šœ½û:/ì⃟³Ÿ(L„˳ò®eCµîŽ(ˆօ»^Gµ uÖ  šR0ÛÑÑïÑÚ¤LÝ|„£aåWìÔ÷uDôûe–η焨x vi¹ž:cÒªOÝ *îSOMÜiÒã[ÂWáJ÷CVmUuÕQ[ª…DY2ºËA  4ç Ô ÝGÖhŘݙeðv…è¨ýSì Þ’ËO]£3òI/+f¶XêqÞ­^¯!ëLü«|6µî+Aï½HuùK-ÄêÒõ¸T÷Dµ¡,ô¶/·¯ôocÖ¬»ÄZ¤¢~-hV+ÏMžâ&^*~ðÈîüñ†´åÖôÓm\Ô…§7OO?4[2sîΪjI‚ãgä… ŽޤÂlfŒƒsË)Ÿ_íM¸ z.ª¿4ž¨–³“ÇnŠQÏ cÌšøD½Œ¶•y‚ ­Ù[ÓjåJ…aé uš®²e-&(AnkcaµSíž/y‰œaë×}=•†,÷­+<2ûj3HÆŸK,:b[@¦uå‚Ú+ôžºM:1æòO^€Eî)ëÀ˜‘ÑYf×*­Ó3ŸrjvDÍ‹e0_¼]^CPêÉ)mI¤m€ÄÍùk©;[B¼à÷é†ì7fÅf Rë?ì(ñÊ$¬å«çݘ„K6ÞÕ¤ÀK(ä„ýŸÊ¶ÓšQö`¼+…7áꄼ…bœGgªšO¥ãìŽzƺ¼”ïË?}ð‡i™-°^oÒFŸi´ÞY ®wT|öDÅ¡.Õ.¹ÕE{?œ&×½‰¯7ä"aù«oÎ_çï²µa|]àf3îÙ¶›QȃÑß¡î:ã<¬wcP:aS.­7Ç' M<=Ðóå9÷cøk 'Žá8o>ÃÓZ,;tÏ9Çw®a_!:ÎaZûõÃ6DÊ’•:”«R@£j«–îˆ]›*Ù£…h:i²°©éI1zU(¦<¬3Ë‘;¤ž*“¶iñ­"vÙqh¿*JÞÜE;Özë¹53ñ¥Mtq¿ bZIsO8¥“u¾Y;=½UŽ/%'¬_ÝÅ*'Â&ŸuÑy ©IßJ~*Au!ëÒé)Þh¿ÁxÅí½‹¾4ÂázA*Þ*i¾YƒÛÕXi¡q<ŸÓåNOY`µ>0UŸ6‹Çû©Ÿ0 â–OTLÏ´Y»ªPWÎXOâP‹zÓ“Sš´¶²ÖðL36âô>áO2ˆ=[GdKÚkœø¶Ö¿~zD‹J˜@KŸ‘m5Ìaß¶}ãÉü>Ø•²Y¹uXWÎ+îaT²1J÷ƒî¾pljëPØQ57jŽô€¬ÃB–é–ÒM}öDµ•.r÷üñðS);º†Ï~x“•a—oj“wª½;âmù)e!/j‚—-•Wª‡¾‚%×(âJÛi ä)×–5;ºrƦÒÕÐGlkÐ9@xá» ç R5HéåãewÚóˆt'2SJtÞ¸tcœ4¼.ÐpõÂm-H´òð4çï| »¹ª±Ã*¢j}öåd«%'Â2înn Äl¨3E5˜Œ2>ç×Ï©J¥jžlýýé¼[äe…qÇÜÇ„M/¨‹¦´Ïg6'xTâŸUк¿¡×Ýo5¨g\I¯Ù¬.Ó¾ÍÅ^~¾î˜Lœ_´÷¬KØ &•ÛÍÙ³ÜDÔÊ®Q=ß8[¨)º*UA…ÓžÝÒ]h^˜´å*ð©î‡TµŠ¤0ËêõCAk m7©Ð¯d.Ç Tñœ8l;qÚ=~¨jÃuBôÅÒ¤ŠwûˆMfɪë«IW5V;P•ø™´äå…ä–ÙMq½J‘͙Ǣ‘hé›íÜ›u!_$6/á_îêi‹GIÝyÊJ)NešV˜Ž:R={ñ‡•=8 ó(ÀT)4îQ=1+£òrµ_Ç…Ô¼Uw¾&›eŠåïÐ1…N¥X$³£³,ñu®òQÞy#¬ÄÅ /ÝzâZÞTŸ •wCáµ )òÇño$wÝ,­z´ºJ·\pw”ßiS¥ wç ¾ÎºG“kYòž}¿Ö]äV•=`:éF–YæÔÑ»fI ³æ5Uþ2\g£Î%8ìÎ'’\+LêÊ |°+‡Ù­z«Œ„»’—åÝ£Û€+ïH#¾,û- b‹t×ßtYŒµ'òvÅ™+G¯Dóái`Ò¼mÞ>ýÃÁY*½0%<à õþ}PÓç§=´9aß ]ÃO”yòõD£ô¥@Ú1ü½ñ‡ŸiJ)iW•JS‰é‰‹¤g^~5z8¾Ø™‘bÒšm÷Þ-„ü›ª¦{hï„ù„ÉGnüp¬6èsåz½ú¡Â”•Ut)å`{¨ L% ¸œUŽ<`Oi¦PR“|b aN”› ñ=úöšm÷1Zð¯Žu¥}éÌ)­Ç«Dʃ·öo>¡œ72›Ër‰iSÙœ98Vh·m}‘¬z™wŠ{õG ‰¢Ê®€/nÏò†'õ® 9Žá»xï‰7®M\BŠÁÇ û!ùõ¬Ð›§v&»ñƒ¬ÃS‡j©Û쇦[˜j¨]­ÇñR„så >¤¥{÷ú«ãŒKÌ(½yy¿–pþ±!ô+nÞüº=°‰¥ƒB›ÁÛ˜‡Ÿ%h¡FôåwD”ÂÞø¾6uÉ?ŠŸ”*iÅ8éºh¬SÙãOå:†¦5L›ëÝCLFõ€žªÂT&´upŽ1&ñ"hêJxŠä’wõnß *p^Í8íØt!Óúº‚†úR5 Cz²®>îž|»àwågO|;á· %†°Ý^ÊûwwAÖ^¿N.Æ=C•.Ì=K»kÄušDÛ‹Bµh¨Uq¯ ÐçC·×—¾ø²å^SŠp$¼†Á¼j)·Bz«”L^YPN-§IYö34âPwg$)Î)ÁŽtï4U‡¦Z[“Hut&wªa5`9­sTÙ ÓñSò,âÔšu³+JMr¨¨Ã ¸å¶'tº^[+»iASÌx í‰Í+›q« Öµ*UÐ1ÙZGh2‡m RÔ›zÑíR•HYë ^îi¶Ënù÷–µ|å©C¸ó Í6Ú*««W>ÝÞùA›*™¥.žÞñ„[VƵWPååm½ØSßtp¥!À⪠®aUê½ÐäËó/!*-ïÿûQG²,I¹y I®3ª˜t‰j'­ REyÔ"mææ&.ð’ióV†­Ù·æ¸Ï(³µD)ãÝÖ»zÚ65ì7‡¿ª´]m7I¡é8³f8ÆaõÜh|£S\wb®è˜žvyû·Iç íÆ'¦T•\mJ*Ú|iHi/!°»…On4惾%¬Ù·¿0ÍôÛWáY°f&¦5ÎM%‘è{ðÖ:†5Hšú'Ø|=aŸŒW¯¸Ÿœ²µ­k®³p=¦Ô³6oí,¸¾•^ü5Dƒ‚ª¾Ðæ*Wà¬)m.RêOø…+±I î‡YKq1oø¹²h®èC>qÆ¿vÏ/o`+úQäßB‘/KjyŒUñ ¨ …u¿ZñxÀFÿóV”«•”yHÐöçáÒìÜVô]¯q¬M´âMÒšÄlKÌ8ÏÄ4_éê]‰™‹ÙW@> aéÅ)W^F­[¹_†ô>m«¨zªÝEméüâByÆß_F‚—P¼¹¥Ÿ˜vý^l43â©+¯ü²¨–šÂ·«‰ºñ¢¢ÎšÖLpe¨QU Üz°þpâõUº¬zð‰Wuۭ혗yµÉ'^ ˆÌã·¯i…N¶q¢vl¡‰iäLÍ•jÑžF´Ë`÷è‹oH\˜@I¢y¶Cö€EjÇn>ÚR,›yL:Vº]¦g›vÞh~ÕqãF•|úºéÛ–ÚèSœa®¢€ˆ‘´¸3oMº»‹UBPkz*;`O.ИâT°{ò‰É×®q«Í_e!—×"μü~íÝywı\Û¥O)!9â¡í¬ZXù 8Îòn÷*‡º&­9Y&õ\!犫ð‚`iM‘!R¶ÖñÏÝ#ðƒÝ:w(âï&IÃõWë3¦î¿æÛEÆ…6(WºÒÅ7[ÈŸùV%´œ:íçßOf#²o"kbZÝL=Õ¨ÊÔ¸QþQ$Ʊ½bÔ.oü³…-¶ê ±ÕXrubŒzˆÏ¦‘®ꆫ}äžêÖ'ßmkÕ­±xlÀ÷ŠˆâK¿Vªì·¦R¤ýÕ€®èá,N®ãbã;î‘ÝJ÷CÜñ¥÷å÷EýØò}äõËKƒÚÖ“*jQ“ú¹p ÏV®·®Qy`b¤3.Ë 6ËhJ[hq@ÿç¨Tu”ºS‡[ç§),åü%$Ú•/ó1ûœ¿»,=*+Ãÿ†²{kÝOdŽÑOC mö8rÔ›Þ"ÂËí¡^ 1Á嘗K}%k1ú·¡»T4nÍ,«¤(ö’=öÂ炜«K k}G†v,Ù×?j­ÚN}^¨rÒSÜd*ö=4ŽêÕ,Ñ{‰Ã´TDšæRÞ´¤êý*Wt10«õ¢»ÆÚ]\˜»^jôåZsoÙ O uÞENu$eO”xÁpÒùÈûõwBæÁh43ÅÊãÑH.aSÅMvìî¬"ÐS‹Õ¬¨Ø™µ (V´¦$vúáû]³©Ö¥’*kÑANøšµõ#V¥$+˜×>xM¢¶Fºø¯Òé=}q)9ÂxÏÕ)’UìǾ&Kîñ]¸+ó°í=2à¶Ò°¡°!^ S¼Ä»ü MN_iÇFÍ{$§•Çö™–~c޶:æYÃÿ&í4€Ú^—G6¹¿ñײ%íÙFÕÍJ…âN]gÃrjª+rÖ” ݬAõ˜3hx€«u’:pæè稂ü‚³µ›kiìû…|‡úfSŸõ¦ý±Á¬ExSjé})?yP›.ÃV.YÏ®)ß ØVêºÓ’Š^âã)üj£º?²Q¢E0 A=bõ:aý±Þ7'Ž|_l/F䥯!&™a¾ ˜bÄTÈ  ŸT -æÖ—¦âëb¿|D£³Èd%HºÏoö_®ÍÙÄÅ ¬¾W1¯NPõ¯0•\m²HÙï„7k†ÍÝhS¾þ®O|8¥¸ÊXã¹ÈçöuÒ&™~e›¼,6­Áµú“ë‹:Bfni©)™šyA 0ÒIZÔw Ã5(Ñ)H*Q-ä¦ÍKS–÷úBx„­RaG€0¬îŠ’“‚ŠÈiXÕ¥&”¡!)HJRR”€”Œ`0êT…|žÈ˜T´Â84Ô²^l`oQ1ÐË9ð˜“4ßðÁJc¥?"Ñ7]][®«þÐaV2Ãz¶RšëOr¨{¡”ÍÉrï+¤ƒë…OÌ!Wf)VáEÿÓ*ðƒKqÊÇ¢pûÄ]ï„OÊïepoLýÑÇû°ÕªÖ­²£õê)Û¾%m+ÓK@XÕ#”IºßÆ¡=@Ç veëÅa#ém`[“¼2—Úåf’-Ü¥Í÷ˆÎ°˜*,«W´ãŽü)ê‡-¢ùBSÌpË=Ðå²uAMw\PóÇ(øAïD¥‡²¦ê™(QEãSU) ö’3ìè…Úòò©*zÔ—Bò¥ûÄu"ñî…[ö[#‹>§¶ŸÕæ@§[Y×g\#I¤8JÄœ•©6[­ý[/¿i ê¬+J\˜¾v÷¢âÐÐíqÄCzKi:hô„»(­B¹G ÝQü"'mÛeå•¢Y§Œ9¯Š ÀA±-©ÍE©ðüÛS.à–ƒm&ðV„'¶‹0êjå±m©ïD¾Ÿ×{ãàIb»¯=09¨¸¥wUÝØÖB½-Â÷ÔŸÅCYV{Ÿ¹}?ñGø¡»&ËÿbOÛüãàë)Äßà•_׌3#d­ÄÙ¨(ßzÄ׺cÙr¼Ò¿‡Ù­þè6uŽâ®"Ye[µ£¼ªl+ùŸØ’Ü¿¤ ¼º£z¢E~—ç Ñ‹½ƒL4žg‡€T9¢Iågg¥òéWá$ÀÑiöÅe­¹›û•ãÂéj—U;g¬s§ó{Ö Ò™TÞT“ fœªzn¥eUú´æ‰½ RÝ Ú6`³\mĵBŽø—fyBÒ˜ÊTýQ½ÕòzîÂ-{IµqYÖ§ÖÀ?i`­9Åþ²™1©Ç}¯ÀWº5¨ÕëxÅÎl{Åb^hªRWX‹º¾]áBž£‰ê¬h̓héE¦›>B^‰EÕÎM­'ƒÉ°M ‹s’¥ª„4Ê [ªÈ%kFŽh­‘£ˆ—³Øë3΄ªni[Të”äÚ(„Š0¯úÇ¥yWÕxSŠF=ÇvÊlµ«I¼=hAëÈÒO'²ö€×¢_V÷ðÒßµÈï‹nÀ´ì‰»³N¶ÎñußúJYîƒÆ^©m‹þˆ¢¿ G|%-2­sˆ kÒ8ýÞWtN1d”Þu‡Nð—ø£àùVn¶øtî-¸?7>)Å•IéiÛw¾°Û­ª^eå¶Ãk_Å¥,¼åîo2…ÖDJÚŸÌs ‘ÓRF0Õ²ê¿øDÑî—Ú/SÕ Ú“KI2öÚ·U!ž·Ö“Ü`ZzD>.ÅQÝç¥SLvÕÜai‚¿sf§ìz— N–>Š»hÉ2œxÁ•(ôqB:¡û2ÓJo;lL­;Ó‡çÕ XïÚgç&>‹¡‰I†ôvÏe,²—Ѹ•x*‡º³åÝù#¬SÆ‘`™ë0OpeK%ÿEm‡;ÀR{âo^µ-3Ž!Jo–ŠS°cÕX¼¿áÔûa-Þu ¯ã\ä%>r½h¼‘ÖDZO™{>VIŒR×,eNÚWª°oŒýû >Tœá ­Î@ë$x‘ r⮫n¡>Óa7ˆîû«ÿ 8ò5e7gâ¤YלwT‘æùø½Ê¤LÊU7™QRwâÁCŽâê›ÞÏûi{º- "b^üÌ£‡ø“º²~î=д*^vi¶ÃrÎòuF÷P§ëÔ¶Ñ¢Ü ó%jü ÃM(£T›ìñ§®3m\l¦e[FjƒÆ4/Ém¹oÎ&nØrbNÊÁJp¢âG¡,ËR†Û…¤r•xÝmv]“gX²‰’³%Z”—N7[eª€YªÝp€ZÔ¥PZ?¨ZÖ+’9-‡>zJúM*“ÏDiΉ±cM^‘¿'4ERÓ­¬%iÊò)Õ-5^JȨ"µ>§åÞÔ<’”u/ð^‡xz›Õ<èJýš÷ŠŽø\šVƱ+<'ø$ýå½jx~ÖÍQ‰IÍkº©9d°ß÷—}fÁåäõ)”»»VUøR¨U®ÂUuº“ºå<@‰Iצ¦pVǤ|9]ÐüÈä<ߨ#Õ ™J~U~ˆ+ü䮓ÆÜ1𨄖oTUç7]W)ß#³Ø} âWV éO¨ÅÀç*tPþÅŒLÄÖ¬µœ‹ÊJ+ÿ0¦t‡n.bjù¦³‘ÇÙ×HBC¼Œ{¼is(Ê­#‚¼ß,,)Ö€ z«ë×|_¨ÅHnU_¾MÞ’šwu*º¯Aæê‚NNÓ®Ö:æµL‹ž•SáZ÷D«ŠS7jŠÝ„4Õ\×úŸñ)ÿñš9÷!¶Vã×’’Òwà;« }KfêP ·^Hñ4‡Öeÿ¼éüá·^¼âR¾ øaùÕ‘Ý Ì&[Òwê,ÿÛ œB¸„­×þB­¤¤Ñ¼OÑPñÅ´‰TÞ-©ÁÎÓž+†’8™[¬4áVíSƒñ Aµ-9ñ¨[…ÁÄžô¨ŽøbÈZ@º‡^ë§´‰-HH˜šGžØÒŠU‡JIO|YÚj[ÎÙ’j-ƒuSKóR­ï«ÇŠ¢1â¢ú÷$Ɖù4²t|"fu-Úv–zÇYis¹†—ÊPþ3¢ñÍ o(ËýJÕ±ìËnURv¤›SlB\vÕé²êJ]e=¥¥T´Â< ù/˜°ÙzرR»FÍA¼ûPÍÈ6yN(à&%[ùnŠ:ÒHS¨Shqðâ5¯^u´£­'ð“ ²ÄÖI¹žQ3c¿û–¯}vÇŠ„G’õå2ã)ÞOá¬.q÷Wp•j·ãáŸt ¦‹×ƒ‰»¼šxÐĤÛHäLŽº§Ä~|ÍÍoüPÔÊR›Ì-2éÞ¥;UyÆ«øÕî>Ó‡|KN¸¾[¢wáüãá\Ö\]Íú—<.Wº}÷U¯B-zW>é]ÑÇçì>Èe- š§Y<ÎøTaÆ”^¼rîý[ž>¨C3)ø¶]?Q^Èeo6õç%œ ßqGÀ™˜Qœó KúWHûª¢¾ì-Å̱©u*dúa*'îƒ t©ëÎK8ýJÏpI0ú%—ÉpûóÇQfë-¬«uÒŸÅA KèF±®o¸|=ÐòæN¨K_5ÓàL/†9Ë•S=?„˜]›:ƒuU~’|oB¬9÷9lÿâ³ÿ™Nú*ûhÿ'F§ñ“”ê&Ñù¶q)ps§.Ü`XëF- éIõˆø!ÛõÖKÿÈ_²²ÖO} Fû•ì…ØºäÝr}ÁÌ’ û·¢ÌД’•ÉØó“u  Ô¸Ó¬XJé4‰]·œºTÔŒþõÛËé£ wñsa_“û*Rë“äÚO ‹¹PsøJ—ÿjIÚˆm¶ÙBZi´4ÚÔ6ÚB”Œ‚R0ÕV„¸•!iJд”- IZT(¤©&¡IP4 Š‹GɉN©nK·7f©xÝ”~ó!YÔ52—®˜…%$¤ "gÈÕ¤Û‡€[R¯3³…2ëŽbס_Jòkè¦-&ºWg$­‰V­$ øÂTàÈüSÚ—k•į|9gZì*äå“5.w?*¶ëLp¼T;/&TC²% ÛT|!ËÀx^TššÉ)öDÎÙ4‹‚¹…og~½ ±Ë7€vÿûÏqïHFŒÈ²ÍÖ×(œ‹Í§½kH=°ÖÊ”_qćw^J»Á¤3d³2Hvã'é"¡P̈KºúŽt{R Ë0Òõ|M^úaÙBarŒ Um¡#y(={ôA”d$,²oWŽP%¯ éS7>®^=ÐØkæyϳ, <¢–×.NìøéVD¹ª¸Õþª}¾G|)›=ŠìªŽøZJo•KÜÞkØ*{¡-È(^ KÓ–ŠžøR¤)Mƒ³Š{pÆ ¨Ö\«%šò®Ó Ô§ª Éø¶’÷U)ö©ãŠ)m°³¸6M9ñy·Æ©N`‰E©Êú ÿ y‚`aë»õjð»X³„^ànSè¤lÝXfÀµÝÅ»>ecxGýjӰľ‹ÛKE šúIô®'¶òÇ| Ò'ñu2lô©7I†<žÎ-C…O2Ò>V¥*q}¡ 뮌Kù>°¡xMM±Ù…!ú,†ÏÞ1%bYìvt«Ó %N—WyÃÖ¯ëÏÈHÌþÑ'*ÿûæsñ$ÄÖ‡èÔá¬Å‘*£ó5ŒŽÆVÝAtP м˜ÿ·´EÝMÔÈ)ŸœÌÃà¶µ¤õƒÉ~ŒŒ¸xÿêÿ“ òa£Å$!É䄼…Ó«T0ïç‡<™YëEÑ<ê±igÍl'ÉœºMï…'ÿÛüøsÉ“+ÿâÎþ”çÃ^LlÚRjÑ™°Û#¿\|!>O,!ûÛ@ô¾ßþD'ÖF¯õÀ?ߦ¿ô©Ýú£l GçzaÓÜÙlwGù£ Þ[`ïÍÿž?È¥> o§]3^Ýu`èvÝ6[Ds¸ÿ޶±þEèÑ6bÝ®™ÿΡú62²¥û]ÿÌ¢8–ÏÛÇ[Xÿ#´kÿÊÙÿ˜ÿþl1aXòâYÒƒL¥Åu©ËÊ=°Ô´»ë,2Ê}šCc± Eá„PúúïÿÄ\ !1"A2Qa#BRq‘¡b±3r‚ÁÑð $@CP’¢áñSc²04Ò%st”£ÂÔâ&6Tdƒ„“DU`•¤ÃòÿÚ?þ©âcœù?€Æçè?0S5錆]ÆëW5@¥CˆÊú[#˜11G1N¡‚“ˆoHeWô« üN%}!’o†jþ<ßò_úÕüxš5èÕôª¥OúIÆÿÚ\tVr.ä۶놫–zZüÌ‘HU‰ì/¬,5ïÛ¿cÓ9³]Ïi_‡õ╠ϥsõ¨"Ö­Q[7˜2(Òf:q;ñ^¥ä&æ˜êvö±ÉZ ;ÞîÓ´0]«V3ï5ß³ y¸+_Õ˜ýÖÔ`¹w¶ùs µê!ÕÍ?ålHÏfÔKŒÍNo9ç%óÙÖ¨«¨sUéŸáŠTéÿ´¾“àƒ¢Õ®s :§_Н ê yœz>™©‘ô…<Р*5l· Õª5­Í—«Lr5¤{öÃåÞ‘£™§¸›é8÷©Ô欠½¿ör¯é~Ì6kmE‡ÜYÃZš§ÆÜ=5ñßÃ|f©ÖuZKè¿Hiæç J~l[ìµQ/B·=—íy¦¾8?®[ÇÃ~þá‰FÇNÐÆÝÃ_œn¾ÉÁ*5>{b›7*’ËhÛVŠÓ/Q1~ÙæûتˆY‘o‹·ÿg\»ó~Ì2 µ+Ç-Æ$Ÿw»Çl5S7iõß3LÅJG‡]X%Ú¡¥5Ôècý&”iPlÿ£ò•j-!j5Z*2îÑ÷š™' ãtŽ5ðŸîàúq !´,ÿÁ¯–%£¯ÌvvÀÐê'S+á‡R_Ü1—§ÙLÏAFýŽ»Nr…7m«Ä1í{˜­J³q*¢µÕ=â9OöwÂïÙ†ÎñÇVQeóîÄý~¸tëÝ:ã3]š"M¾÷&2Šu(Z¢F‹{·yÌz/°?iòÆ£¯ãfèn^šø`>º1tbé žÇwLöÚuö§§†(T%´1³o\dÒAS/A£}7‰îÛ°ºˆ¨.;ÍàþÙлëû0{|3~¾ÅÛ®ë×{¿v33ÚhùŸâpaTªR£§éQ‰0:¹|1·º"{ºéƒäGkVñìõß =v'›÷aµ0»wÇñ¦íŸlÛã-N+Õ:ûzƒÞ4ÅtG,-3:ÁþζŸní<ÇŽT©™ûBSkJñ5¬V(1¯y ÚkãŠôŽeiWYdö—Ýîõo ´Ê•ê¡Q òƒãŽjäycó³ðýq&¨ù ±ùÅ ~uOÇÖ€#yÐà[To¡Æ)šUTÉ´ µ&b{°Ôó ]‹@¢ÉjŸu|°õeÙHng¶íºÛãýlÇQŒÕ å Õâ´Ùejˆ% ÄDïÅjÔ✛¢’Å5ðQ3е¸Ž 5ÚQâ»mæmœÜ¿1þxÞ~8w~ï'qóÀÔÁßMñ½Ñ¶¼£Ë|)ƒ62y›_³×J¡Yš «N£m±ö‘¨‚®Ê¬þœïïQýžíQV󗨑îÿÿ_\4òý0ýÎWM‘š,btøá¾óób7ñoþ\§¦Ÿüß‘tøÿ|±:wŠ$ž]&t‰ÛpÎ\Âô€0Þ_·û}øÔü|q…×àGá¿Ó ;ó³uæ›±L´@´œ!W]rÑçlGÏCû>Ê´ê´åÃÍ11Îã_–+µ‹‹¸A¸°ôóÉÃ~Pzµ'Ö>$ßÃ÷yc”´êzí€K0Ç_‘Æ—ŸŽó¾ülÀós||0$ÿáÅ>-ÖrøL|qM²ËýRÇ+\¯ÖNÄb|öÿgš”øriC^´»þ8ÌSô†[/šJÜÜC'€:F3•r5Tåø¾¦üí5(t=û§´NÚ<{[x`J(:ÁÛá¾=ŸƒÁìùãK$~XÕR[ðÂè£é€SýÜ$V ~<¿â–O‰N­¶óÑF0|V1oáýž”›²ð™8ãåæ…i"ÛŒ8 qúŽ"–¹Rµfbn'‡J«uŽƒTrÖ€²}68kš~8ïÿ«IîßCÈÚºNØÜøoøa=aýl\¯ú^3…ªB¨J5Zuu0ü°‰F˜¦)€LxïçøÿgÑdß®‘EE¦ róI"Ùùb³ƒ?ÍjRM6â‡SþÆÄÎúc•K@é×W´aIØFƒ»Aýžž ?álU¨vJ øN,rS'âVæúãiÆØñû¸Ðc_¦>“†£¨ú팭ºXé' ÞJªÏyú`ãaýž>?°ã0F쪃âÎ1·VÝ1ø`ãSˆÆ˜òü›bø¸OŸLS6×Nºxa/¬)Û jm#wGáý¡AGµ¡>+NêÿF'mþ¸ñüšÉ¨ÆØâU§êÓ›k» özVâZ>x–E'¾9±å‚?´)…Ü Å_4£Ãâª>mDbiP«Sôi±úÄbFF²ø’ƒñlsdê°ûOÿ9rnŸ‰ÿ¼qëkÑË/r=Oïµ£/®ÌŠ%ªÕè&>¤à‡J Ct*u_Ã| ¤o]åvùþüy`ÿhW”Sué핟úq­E¦;Æ­€Ä5géd~®4à!WéŽÿŸïÆÿXý¿––]uZÏë`6âÚcÑÈ„ ©ØSêcñÅd2Œ™‡“Ïdkû0>?ÚþZW:énó§LM:u+®ò¶ú£/¢3.Ý&½ßÅ£ŸÑéE|sùsø‹š…7Òy3TŽžX¿ƒ“§LvÆcÒ •OþlSG\A«èäûßmf_ï.\¯×ÐÌdk~s[ê¶éŠY®>[…NÀ´­ªk¤èc§S+d kA1U#m4ÒFÝþγåÖ“9¹ø™´¸7ŠƒwÓ ™ËÉ%tÆâž£ÿhzaÁ©NêùÖ¸8´‹¬H3¤¶ŸŽ˜&–~²ë6U*éðæ-±æ²¹1XÛM½kÓ.=ã+Pu±Æjyu^¡ë¢Ð:OhÐ|7Ã]B…\¹k©ð*TeaàÁRGŽØ-_Ñô¨dAö†/¯Rój™fåõ¯}Þé[¬ ñŒT¯O/Uh¢¾ ‹uöµÅ*”ê ÏL«Ðk*"ñ ‡* {õÆ~ž[7éUòË}%¯›Ì:æ’“Me[¶)OZj5èâEÁ‰‘0I>óÈa忆34ª1j”ë¥MMúT¤¿¨ß/†ôê%¨Tg´{‡çÓ¨QZ•0(— Ì¢úŒjn}å鈪4Pêda*.Rª²¶Úm|Žü³tÍz”rÙÏ´&g.U”Ð"¿-BËÚ<u>éŠ9ZÔ§M)Sö)ÓKb}çß]p_9_‹uK½JÞ@ÞÓÍVž—?[›kt×õgÑmzR¦ìRåC2ŪúOب”G)Bù\+j×>†‚Ë™¯RãM¡X#ºC³8Ð é8¦ÿg©UN¶,š<<ãß:‚™Ìp ^ õwë§àâ¼n?Ùçá‹jå’F¤DI{Î=jÓ_¼õïúñ_²îµÛ™§ Eg” g›N^ìq¨dÎo.n+Á¯FŸ/°Y E7 ¯öÝn< ðîXjm4ä¸>ЕñÅ:­é4LËßÎ66žL]Y_3:°"š?zÔÓË –ÊУ“ô:**ŠeK=c«s;6šüp_.ÕkÑF"§¨Rǧ øo3‹r¹t§~ŠÞØÑô vþÐ8s1©MA¨£V$?¼F¨ÓÔ“ÎÍ w}ÜSn!«\TFáǪåi‰ßlB•)tÇCÿ3§åîÖ5ñÓñŲë¯<~VƒØ©mwÂîàg³¢qm%%˜#IñR«¥EF71c¼ ‹¾˜±tŽÒìãnÉ?ÇgS†M:ˆn© 2 ªOÆF›ÿ X`µTˆÞoÇSYS§´pµ*a¿ä_ì÷NõÅ,°Ö–^‰ªýÑ0>gÏÚŽŸé†¤‹$À|vÅöYª¡Vkg½I\mZ©Vo…¨±òù “¢ºŸ;§*…5ˆÔuÛ4xŸììµ4[…Jç¸tÅj£p |qšÎ¿jµRŠßqßÞ銂âA&AéàžË :i¶Ûb–j£)“Ë0êy†'U´[:éù‡å‚Þ §ç‹Òã:÷oû a¢'»Ç„f¼­ñ¸Ã|#1æ˜ê þÌœ'}ò'á8§“C¬ ¯=žè#ô9‡MqN…&Ë @Š$žöý,T$ÌæÇ ÊÛ·ù`RK;¡>¸JîÌ)‡–'÷à3æ(ùÔÛç„_Gçònó뉜XðvJrGã…ÿ‹YÝwÔS^¾x}`é®ß52 éÐ÷s SƒÊ-ªuØ¶Ãøó·¼¿ŽþÌŒ-v[­;ùgŒÍH‰~^èX¦<°îœÎzÀÓ$<äKy ÅZl†øžâ!¤v{Æ)‚6ƒNö;ã*‰7Ô¾£ÁhÂŒZ‚£x©a§ø°+^DC÷±r'rLy™›ø×l ׇJ•£A ËÛׯñcßv¿+)ˆiF#ÍqJ²•z•[a©ÐÄàq“ÈÔ¬«›Ìe3N‘<̱`nÐm›L=?³.&1o¶.ºÖÀÅZDî³ÍÞ0à Rw»ålbµví1´{ÖŒ];›£öc/TE: –_<Î×üüâ0…s•u¶ði«Ï¿ñG)––`œj•ŸZ¤Ì*Çgl3¨vœ¾8n)ïìí;éßoAiýøl÷¤*±d”Õé<;Tö¿™þ[1“ô¥dEÿwæ¸ÉW.¤2ekU¢´è×iƒJ‰EZdÀ¾­t<Ì)ÔØ½:mæEØÿ™LØþ]—öZÑΧøÛ Ü û_”‰Fdküp(UqJ½G°)<³1mÛ]=&p´^KÐýSްx¬nöµé;bw?¿ AzR:ÜÂIùàæ*˜ŽÈï>©S{Ë7ÁW¦¸e“¹ö[Ë|èvòÁœwxâ—£2uY¬%ó}PÔ-¤ùc_ÃB´ª­:u­kµ5ª*"Òbà*ÿçq‘«2[-K渇æ+pý,S*ÂAþÉ'»ê=m^§¦zu8¨Ì;\ÊÃoŸL-V®ÆŒ’´ùµP&Û»îö»xHb•ú%äÈ"`ƒ®ØJn6<:ãÿ2´n˜½‚w¿f~#ŠäïRwÃæH£XÁ§aˆñÁ3 ïÓðÇû³(Öå©?¬s§ª4³kÐ|»±{Už`Á%¥ãxcÔUmVÚ¨ÚŠ †å¸ëú¢ïñ~M8ì·v>Å™<³ÇÙ>>Øñù5×ì;+Î+ÓdÑ–5#ç…©D* V:©Ç:‘i•OjWÞƒÝóÅ¿„Î VÜ"û7·ÎÍþ¸lÕåE§Iæ»ëˆ=^1pì|1]5zw½9:~–39¯Ô©ú±òÃeAIךZOž;ºûJÇçkc¸ü;¼ñS(4juME_ºý<¿-ËËPlG_Ž>Å›6ÕM›¯‡ùÿbBvG_äTOw\<5¶´‘·êâ‘1zS~óÂm¹Ç¼=¬R{§” ï·oðÛ¾Ço,}¦³–ººQ¥L±°,Ç»HÞ1¶)™k‡i£a÷ž~xEîc8«TץǪVÕκ|uå¼°¹ePX»9»³ªÀÃ[LF½Ú°Á2ÏQ¶UO7x~ØZ5–¥&‘"ª£­ëÙÔüFÆ3uÙ¯ ´Ot‚Iú~]qöŠ&ÚÉ׿/Xÿ8§ßíÞ?‡ö‹¿òj]Üq˜×î×» Zª©¹ÖÂO†˜lÒþi­æè 錢¦¼Y©tÍ#û ®ŽmØîßmð©XH9…vŽä§©«¯~84ˆå°D/žØ£ÃZIÐh:êF3iÓjÏJ‘…_©Ôôÿž Ö‘s¼ž‘%GØH—:öVm÷óë¶Æ5†ièÇuåÿL)JTÖ¢Ú¬Èj{j1â5îb§KG&é*TëÔXÞyt†˜ÍÿÌrBìx‚~£òw~_µe9*:óß<ÀáÖúOìþÁáSÕŽ;ÛÚ=ÿÉ©:òÅ^]*LG†ÿ< é&'rqŸË6¼“L7N~Q¿g_¦ø«N¾‰’ jQoÒ©±ïæéû0Â7{{†;oŒ… ÈÕow4œñö$Ù$cž£Y×™§O< ¶b³±Â/ù‡nšã3Qª9ZìY;QcOËʵ9Õ9u&>÷ã‡KM‹UÝŠöÈŽw× Cy3¥¢%vÀ§Ù'[½®ÔïðÂîm7v¦anˆŽ§Òìš¹O½0~_ʽt~…tÇ ¶Œ6=ÿÓø4H5OOóÛëŽ#ÉoÇùU«ÄØþ³Z>¿ç¦¡í%E¤î5Å>(Tg£HÈOi“<ï*£ók<êâØ#¦ñЉK&©ÄjœÂ ‰¸/.»à„[. kXµÖu8j•H©j²Ž!´h'@gsá‹þË–âjÚ` »n¿‡ž­%£QY¬«L —âqS/ãVžf…Zk˜$ éÄXÚlÅz÷EG. è“×Â1ʲyi zk­´ÛV,4¬ÝõÀ¶ ‡4Æi7xïýÜf3,ŠÐf“þŸÊÛÇ¿Tß¡þ›Â¥ÍY¶Qü@óÁÌfyŸÉÈ­IɝЛ‡×^ˆÀNÊä÷ƒX³9õbåŠ"Ìh¸)qíÒz °U&W‰uNò=г†.¸˜§=v ÿç†`ìÜdG]v’|0a©&åÛ§ñ¾Æw~ø\z¬÷\öIÛáÝŒ¼?%Uj½`ží>î«á“¢‡ÑG÷qÁ±ÒÒ *òè{ð.>Çe|:û¿\*.…¤{&|1”R–Ô¬«QˆÅ·$þ¯ùëù?~Ÿ“RÄÆ;käGÃwãÇÙ=صûCúYƒ5 µ©? >o4f­M†öŽìkù?øà¡Ã7wƱ6ü„Tb¡ˆ_ž¸£èºNâÜÅæ6xØc€äzmbÆ‹Q†R>Õ—¦gmpAëË®®ºbç*„u'|BºÁ>HIˆ=7ë¶ © U­6sSžù:|Ž Àsk®ƒvÅŒþµØˆ K˜Nüz7$ÖÒ¾šZÌÛŠÉ£m§ÀâªWcV >¯Ú¶ jtâ™­\ T&bnéÞ:oUÍMî×î·wûZu¶]"½gmW…K›Sÿ5¹{ð *€¡}Ø>˜.Iv!T|Xè0rùsSÒJY—6ÓÓþn*±Ì&F“mNŠßT~µ@[ü8VLÅv:‚Ù–j¼«»[)ŽlÎ`T=nTM§D@=¬'>M´û\”-Ý *'þÏézM-¨Ñ먪œÛM³…z®ª²™<·ú`é.ä´ÃgêÍN¥î]ÛÇ_’úÕ’iØ(ùœK׆ ZŒ•“÷ˆ´üAŒ”ÊÅäÌ|˜öl  v¡§èm?Lq§n‘ONëwóÀ_Hd™ ¾‹|ôxÆR®GÒ”¸´ø…è7ª¬%4 ‡J‘ÿ.üR­RóÁr=Q}6Ó³Š"»›*(m®¹xÁ­K.ÌΞXC±dÜü°×’µá j:cW;^³Ù&.^YÆÚ¡»ö™×A͆h BöG_badxà5 ¥ 9g´“ÓXqAª²ò¸6 ¦Û 5ÆLUsÕ…+j;ÊÒ¤”Ö—¿à²qqhJ¥µJs·u8…°êFÏìßÕ‹ä@Á©¸ý»±Jµ^lç¤)¥j¥¿« DÓ£òßëƒZ³#Ö`ü*7s¹ön]ÇëF,|ÙËЖ Fe¦£~p5m;Æ ÒiÛš#âpëR£óÜAÐóÀ“ßÞ4qq©nôØö¤Î=]Ü9Xq*¼¾:áX:Ÿ» é3ËáÓ Yf (ÖÞ=Ø£™J…h¸U«JnP”Â{º÷à02]×ô‚I€5'%•»„§Ö‘ì~—v K`è:¹ñÂõ1‡-|ïL¿°ž5Ø ?Ee¼1eZõª¬F['ê²ê{½æóÅ¥ÈE,Ü;œh¦"8’uÀwbSšm<ë?O–$¶ØOK›S´ÇË ÔCB¢»D˜Žùßà5Ç ºTµÖ H·½|qM³­ö„غû­Å1>xË œT«—ÊÓj‹Qb¥2 ƒ˜s$ YÓ1'Z“3Wõ)L/°B§»Øhë3ã‚uåçÜ.·±=Ý7ðÄ…ð6–iŸŽÞxÌA_‘Uéš‚à`¾˜0çåžÿ‚ƒUÎÍá#–.°3öq'WƒPñMàlF›áóÕÖêG¦ü#ËÅvì ß•c C.Fg3QJ—@¥L¤ £¸ôøa«fóU+Vx71<€í¥¸`n0uÐycC‰­Ò#x#~×ðqÍ< ªX£HØáÂÕ~-ê-°ö@çn$FÛ㳯dpîÚt¼qPÕfWUWó—˜a; WÞ#ÃE²Õݪ9­¸lg,Wô}[•©ÝXaª`ª‰’ö÷ycìlìrÙ™9sPó%UËúÃoÉqýroÏf7>í>­® ÒQ{kV¬sÕ=äàâQ‚æ—)?ÕŽŒf*„n#gjµê7Q=†+š¢î[3T–ém.ÑÛjšFçq¦*WâM’z²“™–îÇ6Yפ u˜éã‹L¤ój½ý4ÇB€ÐÌkͧ/~x–õh¸ß÷u¶<ðÐn»˜kÜb9g® ƒ¤Eºvw÷±¯AªMsö1AÉæ;"Ñ¿4s4ßÍåø©Tý¡Ðž"ªñ&åÖ@=©¶t™ÓšYyRݘ xoÍ߀‰ÕŒ…fæÎƒNüu´u_f?<¤æš“­Ã’{ñQÃz¾/Á‘=¢§Ãý1SEn'%=j¦/ívöp bHb|x`kíHëpîÿ\~h.ƒRº\ÅÔ¬vAÇÙD -žjó±qä ‹@´3jTI2WM;ñ5Dl-=ÝuðÀŠ|:sʦ߆ÓúØšKmVnÈ1ݾ­Ý¾?še‘¹~ÑcR ëï^vóƒ„j§"©õ ïà´ÈÁá×ÈÖH.áÇÞæP¿\qր͂Üõ2¥ë²¯þn8ŸáÅJ5åU!ÆÓRæ³?¤7Q«@Å:ÜÐ ºaí]%~¸§KÕSTe¨´íæ§6¢ôGP0Tÿ:Oήºû®;ÁðÛ®F× šaÏZm=RÙ|ñØ­˜¬@Jk-÷¼ñ®/f+[24£<´iSüÝÃöoߊu*75aÄÚy¢{¾˜¬”ÜMSYÁÃûýî± ö£LÅal‚œXQ¦æÞ¿  $®†zÒnþ÷ž ™zl¼¬ ³å1€Ï–§nÕ">mo<+ƒQk´ßÅš{¶<ð‘œÚò.¤Lï=¬JÓÐôæýlS­Hð^‘ IÑÙZA Å?Hä¥,Õ|Å+í¦µø~°B–·7ˆÚpâ ºÛ.ûý|<ã­ êyØTnŠFøá†õ>ö¶ë…º‹–{õÀŽ=x* òƒ¾ÃRÅ®>áÓXÚ{ðF¨ƒÊ4ßA¨óÂT4Ú·TÛü:ar´ 0ƒV°Ðw€.l7<Ž`€gÙ?ÆÝp*j!‚Â{eŽ¿áë¶)O·íDüq–J* fÛÚÛ@Žó¶)q(ŒÍT*ÐðÔEO~ÖßáˆDU¢û«éŽìw†¾zàç²ôB4ü4^G*0ÚãÕÔ´y¶×—ÍSîÔ\nFØŽÛͤhÒGf{§¤ïÓÕë5NdXêÔÜÃSî•'Nþ˜¬:ƒýiÏ]…!ûN l6ÁïÇ4ËÁËS9ƒK–kÔ_ÌÒ÷úwã5˜ªãZ¬ÅAÐè1ǬÇE&• ÙÜ tŸ,T«RU¦³å¥M¤˜£Ó'.^‘tQÙÛÛÒgËKñ'©Ûh¸{:¿,“—@B°Žág³ˆ‘´ŠªÄô=Ý7Ç©i¶Ù³š.ðýØ3Xû X£¾7ú`ý¢¨¥š *Óp¬:¶Ð¿­õ)U¿ý³ÿS`å*½¤Õ(Òе:Šºï_ Ëá-<26þfõ·6÷o¶Ø *.³T‘úÛŽ.cþî—·Þ¨þÊß¶± 'ü4=5ý˜…¥/;ëOýX]ˆYÖ«ß¿ /  :¯w‰Ÿ HM ÛŽÖÚ®1^^PàÀæë®æne¦ o*výnÏŽ2¹ÜÇ%xÄÀS#µÍðÛ]µÆUýM)"©G¦%™j Ã4)?§òŠa Žãá‡ÌÑË*p(š³E‹£O¨}_qæ=ÄfÙøz\t^´Hpû«ù¾£¥Èf©º©«V'VZ´…:“Fª¾ŠV#f8 ë Ñ¿-Q[R*Sѧáß±èN>?ÑrɺÒ·;}1­C-µ4,Çðxé㊋y¢°ôåW” î» Ú-S_Ž±Ýˆ~”ƒVD“áÍ߆ô~XÍlÂÒã[Ê8kÍa·ÞÛFâdNçÇU><1SÇ©›^Õð71Mª†+Ž[Ivž^ì5JAu÷X~®É£bHç´¿†Àf©¯|v‰öHßÏ×b@$#¾qN¶XšUÝ¢¤ÿXÓÝ¿Ÿ×¨ç)š8Ô¡®Õ‚É‘×M:à_›UX GÜÁ">îË7ÚµŒ ='IÁ©QM*‡%=–ïßééOJúŒ…ÏmS®`/ÆÌ Ul­%£–¥êèØ-•ð]uñä 5×Ï0¾5Qî•>-°°±ôl²1ëVâíe4‘·²ÍŒµ:‰u:G‰jòƒ OM”÷\ T"; , µ-ûœ¿q_ÐÕÛÔæÒiå²ü=ìwëüáq.¡QxgAQb ˆ7D²tñÆj†œF!Ù‹¥{ZëŠ7¹ÎKºÃíîvN3ÙCjéJ½«ÿl®c¦¶üzN<ú-zŸyT|>û&Yš¥KjŒ†Ki3:(é'^˜Ì“ S‹Z‚¸ì²eد]yˆ;ë…k¿¬E:•öù°,¤î*\*?² m©ÓÏlJT®^´²µ9õp°ªß–Î]ìøsç® eê¥/ mîÆ켂W[Œ{Ó ~Ü<£QLƒjg¹xt§³¦˜ä«’äm †5´ õpÿtj?ޏaIåö:Ç5·Gž(4ÚUÐϳ7KN“ç±æ©¥MPXkEŸl¥ÿÜCýb~pô\O¢Ýj6Çí6è{Áÿ,Îk2Q…±F,ZvðÀûE‰‘ʪ5w-mÃÚæÇû³Ñ0¾ŽÊéÅ#´v¯‡\rÓekÇ_qÌÒÑ$´žý°m]£˜› ˜Üè!{Î9&òÛ'^Å&±š¥Kt½#W²ºoú³cŒ «KÖmw6±vÜ£Ç –â+MÁe­ƒÜf5ðß?J¡ášUéµ0O!mÊòÏËô€ˆÎe(f`ø´çñùuþS#ÈUÃ~Í<1é&Ñöš°~â¾úýÜPZg–è[£˜¯fïŽ2¹à¶V«JªÕeÚ¢•¤Ô˺ËS}tñ4Øá~Ñ÷™Å*T…ÙœíQEÚR—õµµ ñÔè²p¥Ã;TÍ%:e ‡Zjâ;]DŽHæ‚LržÛ¹‰da¬ì­K§Æý~2>ˆ¢ðsEV¥]"ØŸ#ñÅ þ”j¹ÇuWL­J…²´Ô­Ê-Öéà%<¦YU{+Á¦ÿ€à¥\Ž]Áÿ”Ÿ²0Ç)Jš1ÖËE¬{¼0ô¸&›jË6„‘¸™Ù‡-I‚¹:Ý=0^ÕÔHmˆkÏ´.$Ü%w¬‘®¼ ¡´]¬k¾1û³ºç·ž)f2ïíEÃÓ¨¸9Úu›|zFªe= ¼†¤ÛJ·ŽÐ?Z15óTÅ2=äí<»r\™k¦½UkZ¶±o.¶`µÚ ¢ñbDGxÃ=vŠbJ }c2î[º#LEç.5}l¦ì7¢Æt÷FßÊvêÒqžÎ/ ‡O6Ò®V Z¦Å¦æGPfÙj(ÕµA5o°vA¾èý˜ÎP‚¢€jŠ»Ú•ªŽ­§0;ºÆ'¸á>ÐÎ3ŽºÕ5Ñy O«¥Nžµ³^Më;Øh²tÇ£•‹:Ò¯–,Ʀµ&ªuŸ5ÅZ± 3X‰ìéú:â­£•­?'þ<µÛjŽóÃts3kF±¤0îÅ æH­°¨ÔüÅEØÞ[t?È|ù¥EÍ* ;Ô”"wÇœpßú·'÷aªP£ÉÖ×ý‘'ËVG4ÅE3ðÄ=½£Äæ²JÄ·ìÃ!å‘!©ê6Wჯ7´ÝçÙø`ÜšH0‰ú¸KS…äbm ;®ŒTÖú„X¶¡¿~YŸ,9»uFRýá¹w]=à0o~RYL§¼wyí…P;øjËsN°\ü©<ÇyýS ó8Ì?¢Ðfë-(‹¡ƒYi´ÀÛ|3æó/IÛ´«Íþ˜jn©^‹¶UäC1 ~Ž)T…!js-Šš÷œÃ…&}“áHz'3)S3BhfúOti?Õë¯Ã}1ãò<1›qºåë÷bžçTWâ™È 5AxøÃœ }_ä$ý1éLàþ²¦O-·±” äÇéÀþ®6ÂClfkŸê¨Ôeø…þ=>»c6ÕG šŠÕ©GJTÌ‘-í1Ó¿.@_‰ÄâËz»G`Äí㊕ê6µ(®`Æ¡®¥I—i.ßàuÃ^9ÔÁ÷}Ý¿GÕüuÛ\\‘oÊ ë˜²¥BÙ* .nˆÝáÖ¤=¦N½N)× â¥È*Suìº0ÃËå×~J¼=/ÎÑ¥¸S{£pâ`Ëo¦*Ô¿‰š4ø`+‚·´?ÅŠY,ÊÖ­B¨ä­U™ÌÌA!N M+oÔ–"¢ª¯g° Çk:G´¤LÓƒm³QK•ÝWºîÎaºÓ¨`{ºÄÿâÖ‹@î2vhßmuY:jâAÓÇ ÌíÃ:žC»Çls¨Úû¦¼@Övý1M¹†÷Uvƒ©ù`¢€)¯kP üÈ%~f¢s t·ÀÆv|£ç‡¾^ †!‡~8ãUnV¯Ž9j¯7v ®2UÔԤ뛰ókŒYæ<°­#™S¯ñßü >LV4Ñé¹ÚèÚ ½>_³Ï½Y¹4ÛvŒÖaQQ*?"*Å©ºíòÃ2†Õ;ôv²>¸ËåÔsº-j§OÎTDõÐG~ÃúÆr7°}]g•%G«Dç¨gˆåWc§³¹Ø US‹M©½¦Ëç›NSŠü4ÀÉÿ]B’Ó×z«|„éŽ' ÒiTÔó2¿ã‹×›þ™ÀzuvµˆS±ì™Âz?8ÌùF`JML»èiªÿV4îÂf²UéסS²ổ‰ƒÕ[î° áù= ²²•hfÑ›¡F)çùÎšâšæ•XQšžèr6ø–¤«ÃüØ·K®ŸˆÓ¿æÞ]úœ®bš É֮Рÿ\WÊ á̳ÝÇa­2I¸Sºzö|pA߬wžXBôïÊÑÌ¥F#ó|¸&téUàôBéùôLàNוð#Yùcù»3!§Rm$ž5ÆæV =ñéƒ{o{û=Û`fЩ™Z8¤_ïeuúo¦;”!²ùÔ„ÅB%–:=ÝñŽ«¬ÚõzÄ|ñ%™c° ë¶»Íås¿ÍTªRªŒÁ™Ð+•›¹`˜‰ð㱂£˜ÇžѾ¢1cÖ§B?¬©Pr¬NÛ: 88Ïå©PFTwE.| w7 CrÍ©ÍÝR@³U†møÞX¨õPÓáÔT©P[ÑàT}¦e¶¿ 9«TµÛ› é7hݽð®ÕUì?£óŸã‚âV›øÝ¿ûß¿ÐE2c‰n±¾Í…«$4)t~RðßÅJa×b¶–Úzîé¾<\Òѵ)³è‰]ôÛdzý`¨þµ)·næž':´ÞœŽzD$§)µµúã&µÙ2ë–|Ò8« îx¯Rí[Ü ý7Ó ÅôÆ@Z>¼uøoå†û%sœ«­–(á5¢L1ƒ¶*+1¥M’¨§F˜`d¬'ãŒÀ¦VŸÚÚÅy¯K ™¤|~X{y:îD¾ÉÅ,ž]KUÍ2R £TÑ„’O€æ=Uû]H9ƒÊÑšSÜ`êúê0ëQ¡¼ynü0~?Ž ÷~Oãý5MEÌÔª(6Ä|ñ@¬ú¬Ý¦ŸeY*=Mþxxê×D/Ë‘{@±Ñ¹ iîù`åsÙtÒ™¶™ëÓfé‚@ŠgUyí«mÚÅ•"EÓ·³øùbêUAí€ü7ÀekyŽ*tßQK@"%¹4Û§á…O´2ˆh[š`r ׊u_˜I7@Æ æ*'K•£¤û·m[V¥P½9µ í¼E½vñÁjŽ; Ø+×ožó<˜ù÷'¼/ùà Ô÷X“¾Ç’aË)(þZüðV­Ôé Hr«ÿã\b!ô 3ç-t§¦‚*îæ†§ªÝ¨R½@™sµÑ†ÌqQR—Ú¬x×Hhx³H[6¢ÈѲŽ{»V÷yÆ¡øfÇ#ê1~_Ò›v¸Uÿ9ÝGkÇlpëç –:As,}ÜS©C4+·YP'꘻EõI®Œ,…¨dª¥®XUv¥LÜm ùãÖÕŸ «Ì}‘jݧŒÇ޹¹ƒ¬R §È#—¿ˆÉ<×Þnoï Àöó^®dÁ#Ù>G¨PËT©t+(Ùß²<εJTëúUÄÕÌžaHôJ¿ÚÇŽQÎWÏKtïÓ ;ÆŸ³ë‚;ÔiýúQ¿7xå…£–EÌ×+sÓ Q­ÖÖºÄ$k¦±ŒÁÌ#S?háhåû6hÔ5ªtc—©h!u뮕a©‹*ñä1`BMÝš¶‘b.ް º„ögϦ-[XeÇ5«ú"N)fUxÙZèµ²¹Ú jekÒ;µ7!FžÒ´2õYyˆnP³ØõuZžÁ}qÇ}Á‰ã‰«™±¦ 1¬lN¿r¶ý–{£Ë“O8Ç«h»î¶Þo‡^ »Ý »“Y÷zí‹ +©ÜÕŠîÓe³wSÖÁË€ìÌ%Fëïmþx7SgQÝNõý˜Z•*8%x«ª÷Hí}1h˜¾:Îø*8®ÆŸ2fÔþ•8csBº—÷jª®£ÏÔ^a¿)À`-t°,Á´Ö6‰ìàЦdžäÊ’»mG]†;)½œ¬kÛ0tòÇ ÇjÁì™âèzë5¸j.þ²úÊ õ>S8õ­Yú“[Na¯Ný¹p¬ª^ð?ë׈×ÉƒÍ ®Ÿw®.Ô0yåTÛhØáNŠÕÕ{*;ñ­u bâ)sø;`Ò­V…DÜNYkO…Æ*!Šu³InVõ,¹nHPÓéÕº1Kÿ£â’Ó·Ö$/ÚAëÅÖOÄHÁàðø/˜øé‡ÍHf0œÌì÷R#Ýe02ùšìíQ¯eøT {XW=¡¸ø?\)ïf[§ô5£J›=ý«T–Ó¤b¾sì¥.K/ª…$T7)bl^‚qU©Ó¿3“^0*áåjȯDk¬(.>C\YWE©H«?¬C HÜ’³Õä“QN­q€WÂÜ+Ì-»R9[}×á…¨µ&"b@×¾5ÂúÓyZ~ô5Iâe« Ÿ+wõ¹G‚Q‡·L O³ƒžÿg= ÙZÆ¥ÂÐ+Ñ¥ïR«—{k)ý*˜4²K”Ϩ>ÍšD{}ö¡]é°$ ´úÒ Ðã*Õ‹§u 0\úÒ5@Ìs,uý$â¦Z§£ót«£G­&£T <0Ün08´N_‹h‚GÞ×\ ™Üç½Pxðc•ª3)IÚ6N”—g½­&Åø.4)Áö€!v§L ³TÉ»^Søï‚Ç4=Û¸ZG½¾ùÉbzÛø×éÔsS™„Æ„ô#\S«áPs¿ŸO8ãÁ¨yd®“ˆ¡Qœ·  òÏ„`Õzº2–ᆈ†´jDo¦øäŠJ:¯lë t¿‹3Ï;é;Ÿ½„m§™d²œss ”^ʱï ë… iiT‚­'C¦1ž6Öõ 0§o¬bÃÓ»_œtñÛƒ×fÕ|€ÔyâêLI]O1~_îŒ ‡ŸdÒ RÎdj×Ëfr•A5é1V´ûhŽZ‹ðœSËç/¶mÆüÕ,Ʊqÿ‡RzSÇ +ת®ÕêÝBQ¨Ñ Ù¥/Ÿ½†4+ʱD¥AE…|–qÀÍÄXk‡U"IÓïyá[Æ~Føp§¼èH†™pUb-3­ÙvŸÝ‹kÓ¹Z…(¡¤Î›tÆUrÜ7Jþ ¥Ü2¸²’<1öüª»å*8y+ù¬Ãji]íHÚÙ¡Á1¦¼Ä#ÙiØüp¤òñ€æ = §Yø"Ó¶·¤ÓË×+@Êw™hÞÌÔ£š^ÒÈû=eQ6Ô¦öí~sÃWÒbŽK?b"WѪǷ £Q'Ýi§ã€ÊC©*eHð•8Ð@Ôè#^˜¬ÂåÃN©cª =nèzb* °LÚ Oy'LBôÞ9~ºœ]I”×Pn¯‡ã…!ù†€éï:`q¨0M%º N ó <1<*„‘í«&'EïóÂî³<ºÂûœ±:ü4ë7¼7K&?Ç,µÛ™_8Œ^²ƒ¦‹tánžº5ª5þÃ|hRšóoòß›Tëí0 ѨœO$l ¦;>ö&â¤j–ôˆÒÑ+ÔuÀá‰ê y[sƘH0ºìÄ/ž9ŽÄvHë¼FÏd]ɺ·µÚ‰òÁ$]§in¢×Öàék »›C鉸“ȥݶ44Œpï­îüÖé劔iÕP,—maÛÙëüuÁt<*ôZKò(‰΂[¿™Ì-,ÅF©EVº:­Í¤uð™z¨3 FÊÔ¸Mm¨t›§Hîh>…'GZfùˆŽIÙ·¡#Çzàá;Àþ…Bšè)Rcó¬øÅz†e@ÓÚá¤L6¸J•) •žÞ¢ÝÌ6´·Ë²õ›ÔS©R2¢3[R²æíìõÅLöZÍú>´½:‹ýAñzƒåû/¤)›”7hÒ¸O¯-ÝvÁËð¤•b¬aƒn¸2Ȉ×|,UæZC 10#ë¸Y¤Zå‰`f '—»Ôã—¶åZg„€û8¦‹ž|Þ[o²æ×J¥ßœ_,÷Ï¢ ‹ïl“_»êœ>øËúK!šZY”¨oûT"€¿›NRvù`%,æQjÂÜÉRõ¨£Þïø 7Ú3´®€Úmdm€«éeg†r¼*Ë¿ë`ÔoH3Ú¶¥;I]ôºGĈÂ8¦ù¨ß‹Xy­üÚéïF8††R•4^3)¼ ¶-¨ÛøF=A¤”·†­ÒY'—ÅZo"ÿ[Ãs?x‹´2déØ1áË4÷Fw‰x^}Ó£Î%€$†"ußAwMñ}FïØRzƘ:I¡¾c¥¼Øßo/ñ×l'72xÙÆ”úÒÉå;Û'Nœž­di>ôv±åoл›»–v•jÔê»è½”Rìu”ßÝ®V̪TáK)] À8Ÿµ¢“}V–þ8âS#7JÝ©sòÿ„‚:Σ®&m¹m"TmÈAé¡ÀSãsìÄôß)™\Â(†ì¹‰ùbŽ{&ÄU¢©Ç¡µÃÚ:ÇvÛàfPñ¦[—YddqÄCí Æãá…©L¥ö57 PGm Û¿²ÃoÉoôb`($Ÿ¾ju"V¬Ô̱–E¦5ïá2¹–­I8†µmÃ;-ce?-<08µ’ &_å¾&ª*µVdj—"—?Õòëo†-˜¤•hTR›‰ =ãŸJú‹f²$šÕ2”SÖe×ÚO÷)Üÿwj7«¤}U7Sǵ”‚¡ˆì:à©Õ`åR`¬N»á¡]ƒ.ßß…88múÙ Û¿‘Q©±k™jéÃ4ž à‹‚] †b=Û{^SƒubI…çw%¼mµb|c]ºšE’ M¼—tÅ65T:§M œ#âÕcjÛ0~#o®))5Pæ)-TYEÌF“i‘³Cx`qk5:`ókÌ4™>Ô]Ö#,–I3±;yÂ-¢¡2üH˜NüzÎRÄ¿µÒ&-ÀÔSC»fh˜U`Ût^JÊCGK‰ï©ðGŠz|DâÔA¯X(ƒñÿ ñ.6û=Í;í¶4XØg^‡~¸ýà¡JÄ–çÝ…Vu§Ý­cÝk±r„yüÛ\¨_dž˜e¦ÈJ(/<¡X˜øïŒíe¨.û.a_˜ßLÔWH#Ì`<—™h@¾ÃIUƒO] †+{­ÛÇ/( L¬«@é Áÿ™û3ޝ5L‹kBŠu–5-ö‡yã‡Jªâ¥*Ä7x„i=㤘ËfR¯:[§.Þ<¸qÙe!µív‘óÂåÚ£YT1¶t{Õ"A×l%E*òV§l« ‹ºë?Oô»AY‚|Þ{¼ðŒZåS,XtÖÓµÓ¸T¢Õh½‚¦·Aï×låµ3KLÙ1Ϳϼa\娖F½MªmxíòƘ"¸C,†ôÓ¦9èïü‘éU%lºËUuì­jZŸø‰¯†*¼=U)ñï”PÕÊ¿ºË].äû¦ÓÞ0Áâ{¤Ÿ–øKxM2¶1—ƒ±$h>xlÏ£ÚR›Ù"™&bB}Á'÷á8ˆ)ä׉v·µÝ®èå>»³ÏrÁFh>z@Ó\\‡^mÁ ªÀ6}zâ¬Ê”§M€úɈПžÞ8k€Ú°»–;âgníð–fiªòÀ,DHhOíÁàVD•¨­ÓHö€åm|p¤ÔÊQ§´Xa×»IÇX*`ÅÝLÚ9q-ÉL¬G1yöNÝ~c µúß÷ù`oˆ’ôwÆíˆeiñæÿ¦q4öÔ ×Ëy±uNx¶BìôÇÑôãÙ[rÁµ¦.醞[aª°ÔÃmsä£}ž¤YÙ* ¬i¬Ÿ,fsuªðÒš±{Pµ«w(ÖìUôo£QÛ5é%ª¼1«»¤»ZfÁ 'VÓ\RÍSËi–›ujÄÁå p×Àà¾c/R› ô4ÝSMÇ0÷zí†kô•5†˜ñÀzulè7)ÍÚ¶šbŸ¦/TJt3žüjKNoÿ1y¿q^5 "9öCm§œPÌR¿(gÕMïÓ^˜Ê²þ}X5=.“"ïde«6HT NÚj öfoM{ÆÛøbí¾ïv<þ€ÙG^"ÛÌ:üpet]Ördz®øÒÙ¶ ¡ÝhÐnéÞ{¼IŒ©ž­A½ª)^£fî䜔=!Ÿ¦¤ò˜©áëŸ,[G&s*-jÕ-:ë(ŒÊ.ñ:uÄQËd¨}ª>‚Do‡7›µ´¶žy©ýÉÃåóyïH6_3ùú5ó¯™K†Þ®ö»õmÃÓ|½Ñ¡.œúoÍbã‰,ìDÍNCnöè'ËÌÕ멦¤²[u÷ZY–t‹«‚[qA0ÊÝ<¼pCÓ*“2;íÅ5J)@ŠhkÝÅ#Û½¶œë ¼‘{êHé‚Vd,®Á½îmÄ´·²S«i$í×5#Lv“ÓÚ<€Î=ZµW6é û~¨åt€öé¬7^üXÒ£u‹¼5Ç6jí=‘>ÔôÇ-™ŽàPïtÉ×¹AòÁ“JŒwDˆðß“ÊÖÌjòl–ØëZ¿eN7&×íhxvœ}£ÒZô«s)v©(ÒÓ0ºV­8µ¬<æ;-©b{¦:ã#˜W´¾j…ײ*‡IófŠ«£²ÕÖ„Fz@ì5íÓݪhÓ84Û}Çý»¹:(,|·Æd­ ©±"÷ ¥Bm04ŸE Í—$2ØO‰¯[ØÏÓ½-é,öo/’©Ñ/_‹_7Muªôÿ«á¯‰tp>̵(ÓlâÔ&»/G¨Ì,òZSá‚§85º æ.¿ñÀ4Ϙöê^÷|íÌ ~p¡–8•¾8áñ¢£X‰.¤xŒ[V¥Àk<Üo‚J ÐêÛÜ.\A¨ÄÉ+nžÔïXµ\%Ë7%‰Û}qÓ®'BæÛÛÆa€ïåêtñÅE¨“pÜÒÿ\ ´9`À Tñ/ý-£Æ0 Zl…zШHÚVTŽÿ3\¶‹X7u ëø`Y—¥Y”I5'{cfޏÆZ‚0bФ¦šë<Ú`šf’™öùqùö} (=Džžö$e*:LkPí¾9r#xÒ¨ý¸Uû5$ÛvkµñY\M|ïzRøà\‚¡æ¾îrÄõ–Ó€«ìÚ#“¯L*IÓÎ==R™êÕj“UŠ«OˆjÛ=/)?Úqö¼ŒŒÁª^ âZ×k€$h1¯ Tr1:]Ë®Ú÷ã7WÒ–Ô¡V™ ”­58ïRxTÈKÎЃ«b¾b5ZY©®`Vð$Û¢õÓµ¦?ÚH6ÙÆÈz#.dXjµeÏf!fïSN‘$‘΃r£Û}9v3Ž É]\ÔÄ H+µŒ¨V0™œ­E€?ª®¯·M;ã‹Z«mQË^Ì«J’Öp W–»ÞL@އ^“Œ¾C!O잌ô]C/M´5@íf*EÇ‹˜~{G*ÚÝW@ôhÄ2Ü4 §.°H=âp–Õn7ÑÆÝ6ß\(‹t&þ½Ÿh¦ jZHçý:ãNuæ’/“á¿ÓSŠž²5BVw.Í7‰1Š”èÕi¥@QšÛ€]Àí_ûzbãL³]nê§µ×–>ôa­´ó˜A®¿ó#~“ˆ,±ª<7óÑ Ú=¨ßºAœÙd¶˜ic}ÿZÆîÏñÉ8¾Œ«]fâÞö¸õë NbñÌGAhÛÄi9^U,Kµ#Úæ]p.6í3 à½Û`-ë>]#§ž-o¦º[ÿMñìj'¦øöEÄ †„oí`ñª ðœE.&a£ú¡:üb1L—ÙR{nÐÚᄌŸIgZ´²’ÈEŽþü\¦,ïÒJq P~Ö÷ƒ¶ Âø…‡Ô¬‰;§ª4´kß¶7ÙÍæ3ù†Lżꨈƒ†þáu{¾#EÖ39+}â/C±õ0sµ34òÕ©þr‚íZtˆž_‰ŒRôfU+ÖBô–:§Ö—Z’K0Úœz2¥ Íæ³Žô²ªŠZ¯Õá…+,79¼, ñô'Z¯–5sYºŠ>6Ô–±»oQN— ~—,Œr¬Sjê% Àu;´ÁPEJŠ}™u˜–6v=1“Ë ®Íæ2yx´êÓV#»’æŸ %9!)¢¢´(·SðÀ¦¬ÖYrôµ;H/=ž¯j˜ÿÍ·»‹Žû,ôýøtÓþÖOÇË ‘‚Ó¥¿  öb4ºO€×¸MZ’”ÝWzEêEZŒTS•åäZV™Å^=W5YÖÐÚÙJÍFº©ÞAàšÕVóÄ鬪¯uøg]jS¥R®µ!‚ ¶æ éÃÜü2)ßÍ-spÇ)×HîÄ•cu2¼ûl0<¹¦-QÂYv©Ë”o©é¸îÀòÕ2¨ÄÈñb t'… +{Sû„â*åÈ:.²öÛ×Aícž›DkÒï8ì­AN"™,iÁKH‘PÛJ·Üæ1?>›ãÖóXDÜ|0Š•ƒ2®ðXÆí¦‡Wµ½«§/áå¾,^De '˜XÆÁýóÞ0i1äWŽnÒ„¡ØÜ;§ WÕ*†‡«Êj/°”ãgoxÀHÄBÜmÜôáö`4?‚EW±n!ÃZê Æ ü1+œÍ }­ ¾£~Ñ]0lô‹KW›x›{Cã@=!UÎõJË&7=ØW«R£,É–úFñã¶9)…é%GÖqq1ìêcÏZ½1¬vºùIÃÐô]E{;I¬ª<ƒëP%eÍÉv(už¢f |p÷(ûeVÌí`Óc;M¸ãVÌÓùÁÕ;6ïÙÇû;C5š¥J°ôx«^”£?«=WºHÕ¤nqJ§¢âav\æ @}Y÷© ø‰Åj•s—Ó– L³JݹÛÃN¸¥C-hjôÚPËuÚF`²9e¿a×rywGÌÒËðExR÷6µžã£±Ü<£ãFVÎ4ƒZ\7\F94•'yÅÉ?w JSÈ#ÚyƒÝìõÅ,êÐSG#J«q*úµãšmFZásÔÐi`˜•“QêƒnàÈj|±öŠƒ…^·=BÀ_ðœTJuZã–é€p¤S¨í¸‰1c§wý®b¤úË(uø`f3O\Óãñ«="¼aMû\.$(eûñá†ÿi½*—P«B£ú7.EÉQê4Ò¯žP;ÁzÌ Mj>Ó%y‰ìÀ=9êÛNUR5ý zøoIRø‚½£vëÉ7yHÅ õÂåhTÕÚ§+ŸiŸ‡,Dí_ R¦ž•/™rµ¨ÚÝ`ùâ†sýêso^’=F¥EfWNjU/7³+é'xÓ\f3~€ÿh2”òÔè³5*Ù{ü?ÅöœÚ: e<*<:”ÔīԦŠÚôŸ 2æ½èÌËÃn 'ÄT¢YqÃÏdó9LÄ ×.Ôj ¸HÞ÷vä=!B¥J«³B‘äuÁ'*B™ J$Rt» j XÌ5«Óm1ÁF'Ÿ›Ewý.›âÓÑ®&S˜š*í…¼r€-œE¢Bó¨å» ÷-îìÉ®KuÝG.ºŸ®9É6´-0Z@>,úâÖè&zÚHç€îÖhö¶Èþìß‹j0¨–„ofnÜ®RæP¤S˜*«IÔzÎØ Bº­KÂÅj·ÚØö°ˆo2yýâ{íáíQTí¹ùâÚ¹±–<“·v½|7Ä×s_’œ±®ÐÒ`‘§~µ ÔY=´©RGþÐÝå8 ÞÍݺL{'¿qÛ/Dõ)Ó·‰QÊÅÍu˜û*·Ú²­#Bࡺ,£¬Æ+Ó®žÎ¹tãŠJO({fà‹ì,žáŽ[ÐùƒU~ϛ̗Y‰HbIØó_6c5”Ëz=n¤¯–ªÕ+g*k¢ª~¹\TÿvÒ£èê>Û""V¨?JœÛ>à6à“t¹ IX–»oÕÇ«*É!HÚPnû¸ìϺ6 = k>X iúç&š¥:CAXî±ûvñÆ[*?<ªÕ³ "^µP:Ž‹MG{Ï~5m‡.!êsDk-1†ìÜN¾ïjqh¤çX¦Ë´|qQÛFâHýã¿nJz0ðègý™$ÀOà •Ë×¢´åiÄPÍsš`êGq'¹yŒ.¸®ùÔ?`Ê».mêÁBô„Š ‚d°÷dx⟡ò”íÊz/(élò«JeèR¦OúHë°'*syžV[Pæ)®ý”#Cߊ9ßö‹3nb¸g§ËÕ^5†–õžÒ/ŽkF¾Ž= ŠP¦«r݇s1»Mºâ2´US²AÝñÀ¬×wB>»âTÌ7å/Fâ(ÕPÓÃ~›{}¬Eí*ëŠQ—¯QQ¸Œ`Òæ€ßmñÇÒ¾ŒÊæÅG×t©S¬ôìGqcۤ錿¦?ÙZtXVN-L0´Þ¥Áá¶4ç(g²™Œ­eeSzsomm=Òp8—,wv­Úyc\²ç«ð†D¹uÚtZ“ ß®qÃôžZÿù‰¡Cß‚´3ܧP­ÊÇXaÌ:÷oÓÚ½0¼µƒ|µùãÖS·C¤==G‘+,ÄφEFT¬Åi=1rÜ6§QO2sé0YŒ"f*1¤Öó©¿ì•ÎÔ±IbHî|žná‰ûSÀñ’]°‘¤Þ4ïªÔ«5v¯B½2ŽÓâÓ²áw³.s2ôËZµëx]ªó¦V†SѸn[#—ZÙ‡Ò¥’õL’j^f§ß¶¸âÔn%^%ÌìÌʼn7Dï îÀy4®6ãÚ=O=±ÆrÖ}O-é° ¦#€iÄ‘<ÚtŽkµñZÔ²ä2… Xn•ŒÏ)ÿ\RôÞuT' (¬¶»Öª$Õ+¼'ˆÓN®Iøkw=ÿ“—€À¤êC5茽•÷IÃR¤.u{*”¹O´#xðœq !:4épècöo€Ë±ÿ²ªœdJέ Í»®˜­ºµ×£ì9Ô…jy›ê‘š¦_‰Ex-ËRPV­Zw1éŸHç*q¿¤kŠN̬.\µpÀ€ÆâD1é\Íen[R»ò)p9 †Þv QÍç(T­—¶¥™R˜RÈëI¤,$ÛI‘W…™BÔøÃùÅ0=\·´ÚâÔ©ÄNºi6k÷¼±,uû½øÐýÎ-%£¸Ã¨Æ^½<ÝL°¥ ,}˜4†îÛ«T­V«”áÖ†W ©‚F£|  °6Z¥J%AÚA#®øtÎÑû>dŽLÞ_‘Á_½øjô_í9C<:ÀZ†˜6†#õ£üº 4iQ#/•Z7Š)Ú¨;oýiöºbÅë:ø¿ÃÇl5Z•53¡RfSX¬s6!™k‰Sà …×´<¿Ó/^y–YPÀ ÜÌÛõÅôj%m4˜§a÷±ëråvIq;‰'Iö°þ ¸6D]ÉNüz°yeH='ÚðÆ…œskiê鰙샄§î°§Ù £¿6E´Éå‰^¿†)´Š¬)‚Pwö€áÿ‹PÉ[¦¯_ѸGËㆸ“y¿bóO#.ØeË»*)¦f§+ÔÒKi¥Óˆgw£ÈbuëÝR>õ˜@H .ö52@0æVí¾½0i½®¾²ß wåïÁn#[ˆ‹˜|6ŸC±y´wé>¾xa»ò™ºA/!Fº÷Lôœý]¤Ø±pÿ<5 EÂç²à­Ê ¼½C¼ËÚðÆ«kBE¨¶­í4™îÀåç&=«£åûW¥er®èö4­V($'x¡’…T£šQV/KGvøP>Xß\i¾#xßÄ[AïïœZмW^P¡AøéŠuªþjØe‘ÚèûëðâÅì &t?±fGÅ"©Â¢Ü>E­¢·NV×_†¤O‡Oû tÔ˜b'keAë…ç"ð´ÍB½ÒVäq\-;ªS¥PQ kÔHãß¶=+BÞ+es±—N¢”¹4¨L¶ê×b©ÉGÒŠ¢ö–²Á§®Ú¡Ÿ¦úb®j‰¾žb®eš©Òj«d}öñÅ,®e¯ôz\P3^Ì"v:FþJ¹Z¡Ã‹¢u·k‚˜b³¤ÄH#pq¦Ð|vÁ‚¾X&T©‡ÜÖ?éÃ.`ÃLŠãF-¦‡¯~*TCri֦ô±#Nðß›>ÏXÅ,¡Í ™GìT®ëöŠ$‰±ÀÞÚìøá²Þ‹zôˆX.hÒvµõœß¤³uêÖ¶rùz\¹pENe òÚ)ëxæè5ÅÕþÓN²'Îv¨K0‰î?-²%uÀ õ:.8MGK Ykµý×ߊy‡KZX#a#mþ[¯{/3-¨ÒÝí=;±Ì•‘„ñï Ø…â•`y¦ªû½<ðoµ´õSªËÔjz통¤.åÍ»ö£š>ìø`:î4Ç'‡Kq¸“Ðù4Œ TÂíÙ×ë8ÛNùþ†¹Ñm[Ä€%õ×n¸ WÚ-ÐûF4¶zàqêÕM™ïT½ ™Ãµ ¢·%×BSh%lÇh:o‹Ù¸zZ»ö‡üHÞpâšÒ«Uu,R/xø÷ë‹-¦áÍŽµÏûi„Pœþ,êÐ1Âk'²=}3¯“`/PyžêJ›T勉çÕ¢ô·M»' Yi+/a­sU‡L2²TPª( èÅÓôÀ2캄µjJè`G~*=¤µ2еV¥@8÷½„•QÌüâŽc-[ì㜵•^*5°ªRîo)  ²¨ ôõ&Ý¿Âgà ºh ÇbJv¼>Œ3ãßãü¦vÙpïMµwâpj_LŽnÉá–é‚Õ²œeSýAjº„[8'ŒµEQT}«’›Js#Of>üc3_ÑëM½éвµR“Š´ò¹š´¥iÌ‘ë*h£Ïl.h_J·£=$sw5ã‰Á4ˆË­£f[µìøã1TS3‘ÏTjôßH§\ñ”ÆðÚŽ›é…ʺ¡¡Åb ñ \i¿Œ¸¡V…Jõ«d•¨R Æê|xñjxgjÏß@Üa.+Ú Q§]Y„‚LA=عWÂq EÝÒ0zÆØ×æ;ðAmHƒ>߇ž.V#H1•=â-Ÿ<5\‹ñ%Øð™©«¡~·ñÅE¨JÔF¶È™3 q‘«„ tA ùVµZl·Ê§Mcáç…^à×*€@?¥¾*ÂÙ+†-’‡Äv¢7øá›ÖÓ²¥‡6ÉÈ0¬´ØÌ]O™oûcÕLFºußH°d»“Ô G4÷i§~B-æ0 »û'{¾˜·„×{õ*0?A†üÝ;/aÄÑÉè5ŒB¥2ÇP—*~ÿ)Ät¶Üq7÷£ïb¦‰oá?·†*´6„ú»t{}0Y©òÌ‘}2–èžo¶‰zuŠóñ-Tnâ Ýo7v*‰ ÅÐÓØ=W©q¡¸Ü9c¡=q’ÉÓÍË…]KášžóM¿]1O-œc}3ÆBºƒ¯ Éí(=Äâ8’ÍRÚˆ{0H»Ô¾â’;Ž*Jx˜OËùKC¿]7_çÓ|cy7aбmi)r5»o¾/¬–VQë/±ë=ü£ëòué-<Š»¨¸þ®®ãïOG+ÄuÒúu+D÷[^FF» 9ÔÚi1æxëÃüå)Ö4ßL&ac‡¢C<,£³ÄSp&BAÓNA…†µôž}ô÷bZ–£ZžašÊcœ½ #´€u3ÉØïŒp= ‚ºi¦ÇR¦ !¢uîÁ¯J¾«ÌB«t¶Ùèzaj5U€dø¿ñÚ¦;C¿æïÛ¤o§~ ˜^d÷³ñÇÛr) ͘£ïkuËÓn“ˆë‹m6®»m8ªþ‘ËÔÌg @2Ë1AÞ©Ðþ8tTPM$Nƒ²wÆÀGÁlùv¼ð ÄwøâCx˜šÝpN-¦!H:´M³ñí`1©$Zl0·¤ µ‹ ®Ó§´qT:\Oi™5ŸgÄc’HbÝOA€ÎQ;¹õ*< œsô»´1üqêè=¡ìøÚ5âSòבhqMEµË tØ zŒàNoVJ>±&~îÚåºå`ѼRÙÄǸ}€!¦éêqGì”] «šüJׂþÌ·Ù×üðƒ0Ü%ÑK›‚j&!$“)Ò£PEÇŠÓi$ƪ»kß‚Õ׈–Ó£…ßsÔþÜ?ˆÅ‹Z9¬@Æ4ûÖõÛ@ÏÝßÔφ8Fµ^Üi÷OµôÇÙé)ÌWfIwoWOšÞbFº÷N8µ½k¥ÞmnÌX>îmœSd Á籠Í5JP7#–"ï–˜-QrÙ`TÓ2ë’Á›è-=cFZ¥aVµKUî§$LA SjéP55>>ö(ÐëV–X­Áƒ(âž%§Àc,õÓS2ô™U‰ŽDE:÷×§ÃÃ*±ÿ~s†¤}ƒ§Àÿ ®UÏÚ(¡«I)¸Vc0ÀDòâjÔã0–qÁÒ*!ƒ¡ØÈØáY´¤œ‘¦¥·_Ðźÿž$xhA^ÖØv§Òá±:‡f%jžÎ‚;§l9â²ó')’#Úm1šG^%•* _YE®å Ju]5æŒ âZõ,z5±.•h¾•¡Ó›pý¼5l‘§ ­Ršs=Û¿Ψ}…ÒKèh½ÝBv¼¢q^–J•|Á¹©B“º±=‚-qK3éþíJ¹cÅLÁ"¯3H…#O„N9Ú®{›vv :òªo×á†\½—S° ««Ìs]1å¦!ye˜ó+Ê`£|ZÊîÂ.* Dü?÷°Bòi2nm-ü0¥K­K‹–)®UÍìÚóiᩌqªAU1p«0|g ”©è ]ÔÎÚœqxD§†Úoã‹y„`~8“vÄü¿×„Gñ’ÒzoÍߊš]ÅQ©Ô’7¶6óÄ+’ŠßbVâ;νØšÏ@ uø`=#¬ˆkŠÛ§íÇ5Fˆƒ®Ä>ødNrDL}éÜàJ‹NÝ'çûpU¢£ºº……÷aIÇ`ëòH¾Ý¬57«¢P r÷ù÷oi£T+Á£xÆØWqÁBœˆº,ë÷v¼0…œY«3i†¶Çð1öZ œ£=E]mð$]´Œ6f›/E†%w0ޏLÖa•œïQØÔm[`°vÂ<8Y=*i»²¸«NƒÅgE·¿¿¦Øe«ÙŒ!Ö.ñÓÏw4ð>Äb–B»\¡®¥˜ºÚ¤t¸ÛŠuÍT¬+0SQë³U¨@ÂΡLýÝ<7Ãf¨¹aËÓà0Íö@ÔœQÎå9ŽZ‰ûEæmg%÷ѵ¡ËQà ¸°rY¯-<±0#¿ è¡]’ Õ˜ë‡÷éT§UAÚU‚Éö¿p¡êhÅä)`±¾à`ƒ ¡„½Þ#Ož ]¼-·rÆ¿\A¹u#¿KcûÝ7Â.¢ãoÕÃqªo´äðõý^˜^4iī̷‚$ˆ$׆" ÝhŸ,¤70 èA©Áf¦$ÍðË0ü×E´õo׆®·¿gø÷yoƒ2gfnì }ʶÎI÷OùáhèDÝ3Å`¯/Ž¡h oe o£F:–ض•AfÛŽ‡¸ãí4ŸÕ†„úóÑD¦i‘š”ù¢‹ O‡ví  ð:Ki¾) -4÷ÐÚˉÌëí@×}±R©R´òôøJF¦£¾õéê»·>Ìâi¯¨§dÐ[.ë÷AÄ(°ë?­¶6÷ÿÏ^˜<1#M–ïÆß CÚÔÍ6¿QwiÅIe™¸m›Ì¥!Ü41Ö0CO/ Jýâúƒãኢ™-G6ž¤ XTZÏ’7†˜ÉÔÿï­O©¾ ¨Õ ß–±ëëŒÝhäØkNãÖ|1ÌËë‚ÔÔ6étm¶¡,@-rÅ„lºÄùHÇÙÜŽ!{<ñX[†Ð ‰åŽ˜—¦P4ˆH>=35kš9`E>*(´ÙÍÌÿx={ð¹|…4ÌfV8Õµ4¡MÔD¹»¦´ücëpï¬xÊ6U ÷FÛuŒVZÎìå,Z´àTSÒ?˪Tw¹ƒ8´ Ð`š”S0bßX—Ü}î‡ ÂÓ<Ñ`_fï{ AªEMa“±OO½†¢œJÍF–§Äs>àTÌÆ-äÖÄ«ÌiÐëáŠ'+L#¼‚ ìBhpk´—êz“ÕG†4¿„o½„{0éË3/§×ll<¨ƒ:F8nJS=  ‹¾'–n@¦äQv‚uýnü !B¤’ i#vÓ¼áêiØvw“Ù»Oé¾^þâ9»¦-¿ÖvH`V9b鈻Ï‚À)د3*€³íÀëÓ³ß;¶¾á@^Õ@Y§5Ý6T[Uy¯rX·ê0^š©ÂÔ´Ž^O?ÞÀ Q˜÷V{þXâ8÷…Xf<¾˜)µ:gTï³´ßõé‚ ¦¼M(<Ú ¢7Û¿E¸PØ™žq¨Ñµ“Ñ¢g¿.’¡ôåêDàpíbºHívc|¤/(—¸ÿˆñ*'~Ö3W ·¥Ì`Òƒ`³¦ÓŽ}À‰þ5À, ÄN¤ö;ÿË|Fñþ%Œ °Ï<;»]4$œSàa]ì,â'Äb¥gjÕ,¤òµ»ˆ™Ó—3µfe§PìÇïà’³K²³g®ºc(èªhç \¶b‘åШk×»J"« O{;KgòJÞ¤}›3oFªÔÀ,:EÄc&Ì`ÑÎПgó¡ÖÝ6ÔñèÕÙ¸Ÿÿ«z¬à1&Ññü•ºhø¦cq3㾿“¨Ö>>X·†{/&âWNÑÌu >LícÉ Ú>ëúáŸx ü0TÔsJ•÷ínsQ„ËPâÑ¥EJÖ,Ö­CÝb™Óï„Ì! `C[l#cîŸ)Å:yt¦ÕL—¬í A£ëYmm9uæ?ŒbjºZUo† ‹ØAlÇŽúâ WUuUk,ö‚ÂòÂëÍn*>õ!¹eܱ˜ïÓ^øÅj‰•ü笕]ç &qÂe²¯#–Z…ƒk '³€MOº47|»„º  Ïbü{Ï‡Ž B.^rܧ{§N˜jÌ–\|OÇl} Üm[šÛ”îüöÀ¤—\ö…OkxÛOˆÔ±˜é·.šR~zZÂ{ç¬[㊕Ä+¢ ‚¤ØêÃá¾:š¤ùøé¶¿N¸Ó¬¶­ÌÄ ‚#»øDOç4ݱ=ñ*¦Aæ7Cw‹ û¯³>®9èVåR6ý‡ÊÀ¸j=Õþ<ñ(c_`Ó»•©ÈRÝç¢À+82º´¤ýÓ¾(ü¨*¦WBÊÜ£ý' RŸ-§@{ޏ6€/wKb1O/ñwΣ—@GŒëŠõéÎ…‘G\SoG¨ûbV ÃAjåL‹wMN(ñiM.*|b–Ö-Y6t]ÄŒpòä:Þ©¶îÈ‘ü v-ð×\7£)Õs—̽:M€²›¶£….ŸŒ½ œ´ÒÁ#½cXߎÖÀµÍ˹kE2~€âí­]zÁóÇ£Úé+˜UVå“YÛ»eØð× zHh§£èÔhÑL¬[ߋɠæÌzB“;ð¨‰f~ðϦ(¥1<Kì±þòŒÐZÊÃËü¿!ôãŠÝŠ(}æ¶Ï¿l§ÊŠ´û>`êÞXEÎ¥*jjÍL†E“û·Å<ø©ÅÊU)T>‚G«ä¸ÄyÄuÅŠá&ŸŽ‚$ »±W#HÔ‘Q½gæâOV;rôÀ9JÄQ©P!­¡kÉ y[—R@÷IÂ6b¯­{jTcíÖ#áƒF¢ÛMyzHÎ ^:wÀÅEªJÓRl ÅYmßnïz‘Rµ›¡Hý¾&-©H¡Q>ôŸ sÓ~ì~8©[†M"%šž§A?z%°”²`Ó¤¶—tÖÏqN®dqÌ’ _kI9NØJ””¯È‰¶"M£|«Êë¯Yþ#‘Ú“Ê}áüo„§Ã+H™¸.îÇ ¦¡ìÚ4ˆ5ß|.€õHhøà ÑÃDÿ–gPœÝ­ÒOdtÁËÓ][ˆN×@ØH1†ûkÓZËÈÌÿi5Ûo„pH[Àt+& Ýü½Ø—‘÷aÚšö>@N‡á‰^t £¿¿Ó«¯-Z ÔV¬n0Öè0ÊÊ_OòÅ­> >c«$¥í©0{3Àë‰bM‹ÄÌN2´h}Az•G¬'VÛC8Z¹µR$!«DFû;/Å `PJ‹PÙ ¯A÷»ñtC}Ü?¢í–â%.*¢læº``r„q“ô†Zä{™*šzî÷!=‘®ÞqEªD°™ßGÓ»çA¨¡h·þ#{xÌ3+kPÝ¡motãQ ìé¶)Sêé(¯îÊ&ÿâ]»ð´ù¹B©Cñ׉Ë5‹41öŒj»^üRE¨Šô©¨&áºôþ4Ç£rõ̵ ©˜¯ pþèñŸ %*5¨RycN¸ÎúC/O_8á«æ-)IQWÕSRcl}«Ò™œ¼ÀTZ' E0Š;õtÄÚH DÛãÿvdØTÌæ}[°Ú9†r tóÅö{déÿã8l k+æÁ§H&î}¨ýæqÂ+ »Û:£?ž2îÈjq]•D"Ý÷ÂU©ªÚâØ·˜]ª1˜Ìž^1,{ §{|ºáóÕ‘xµŠ+T ¨§K°÷õÅZy–ªjrÃåŒõÍÉN­JhwÑ*@ùŒ~qF35jfŸx—ëß§v¸Ps¹b˾´Ç²ØÉåéfé¹T ‹l}â4òßi}¶€eE²B|5òÅl–_2•*T°Ø/˜ò_¦þ²­qÅyf¦¤FüÍ®Žf Ð\)òKmŒ×¥FW˜®ÌT%Uª c}ŽØ*”èÒÒÙ ssu™®3[1ë†íæ±{*ßs–ìðÀ*Âè™éävÅDaqsm§›”x1•¯’«VÌÎbϳÿV‹76­.¯8"Cž’n´Núv¼1zêÑðü0X!.ËÍ÷ñ®6×QÌÌ ù×è9vóŒ½VdQ0K´Fƒ—72ýt1øàU§ x›'– Ov¾XÌd¡…Ði}Õ"<9OXÅ ê°—©Þÿ¸«šnp£³p¹›e¶rŸœ¶ëµø`fröñ ÓYØÉÐÊõÆ]ó{ª;{¤{?ÇÏ´h ´KR`JÙQ)ÈÐkÊß>˜¾½Bn{˜¶¬Gp:à´ÛjÉ“Hæ_TãU*ö€mâì.ŽPšÆþxíŽÌAaû0Œj¡ËäÐ*›€@}³…ž¡¦ö½ßHœd“-WŠ´*Žü:žÖâ*('Èa)ÓÊÖk-ŽÈÙc®>Íö~6{˜&@Øiã–ÊšjŠ-Ô6¾;ã\×ÅTm†óU´±)Ú$´ë[œÌ’Ý/sþXgBä]%¯¼kq|ni]nåÑcU‰úaï4Œ²µÄL‘Üm:cM@6G6Ó5žÎÑ~ŒÄ1Jê@鯟ofñèqÃAÈ@/vÄ´ãÙ‘‡RìBƒ-1xö[ÚÞ{”íû0/Ö'¨iö#Vé©Ç ô•¾ÑJÐá—Zª=°1L柊fL&w Mü2£šÛ’æÛi:àRá¼VÒÒ`u‰îúôÅ64˜C=-GkÇßùs´6·0ü!ÿ-ùéü=[.8lÍöŠ!Ü®–»SM‡ë|úb­G©1£1:ÿòùƨ¼5=x‹,¿=–-zɘSÕ$‰y±Ã®)Ô¥¡G³ŸKà#ÔpÅ<¨AF„0R&9£î÷â™û:eøÖŠNHsѪ5=øá™âñJO½¥vùâ‹UHk,çÐÜuÀW!j8c:Ûòß ÙF½Ñ˜+,Ú.1toÙóÆZžul¬´•-ÐI\]I³¯\\&§Cwá Ùmø´Ô§i4ù†×0œeòvµ´)Á.ÓŠÆ™Š¨R¤ŽV€ÒÛb‰©ÑÛ{ðÔæKß}ao`;:7†)ÐF¦Ó™¦Î Ëtô8D|ÍÙÞª~ü YKñ‘ÁZ’QCLÈtè5Å:²EBµÚÅ»vŽ¿«8«F‚Õ¨Õ%'†+'˜g Jobj>›á©ŠYzJéÃêÇã†lÃQ¸’Ö×}ñë3ùˆþpúp[‰%»Z™ó1®#}Žß´þM±ÌlüGU¢õ)ƒ‚’6žž5R2Ìœ0¶Sa­8&Û:o€Àsm1€C¤B•·ÀÁÀU¾Ò%‰Ò±ß›»ÇÂØPÿZ×Ã2GE¾öǪHå‰ò™ÇlÜ7[jÌÀïöp@™E0Lo¾8”ý³ªÔ'F:ÝËÓ s Òè~KGíÇd=r¤S·ÃbÏÅBDÉý1ç„w$Ž(˜îÞ§ ¹U½ÉIK™TÕXv¼pn¦rßfb¦¡3y ³Ë†£O*2só6´GÃ|QËQË=U% zˆ¶³§hsã¦/9p1i>¨/Tý/ &š@±LhNç~Zyµ÷šýú|³ñ¶ß– Sb® «õžïÖÅZFÊomüºq?Hè>¸ªxn¢‰PϬh×êÌuÛ*,¿+Zm=½~ÜS!eƒ$HÐR˜5 ‚oíFØ Ç(eyk°8L¶nºŠˆ-ò³÷´©–®€äWL³¬|¤mç¦9^RŸ(Sï_®-®w® ¦m‚—,¼AqåÜI× ÜJUò Á¸~ ‰NŒ±aP(†æ"d÷`}§ÓöÔç⨓aºUƺu̶7Ôoý0;û£øúb~zm&?úÉ#å¿ñצ9d®Ó4ñÁéy”Ä`imÀ¸Ž‚Nø†R£H=ŒZ¬œšÐÉÅæ>ÏMšÆ^dº¿~Ÿs\~l“ £³`°ðBŽ¿>“‹aÕ¥´0FñL]à|qË¥·6™iŬ ­Â‰Ì¯¾>X*ˆ"ëG|{Úkß‚ÅèöbÝÖ:`\J(ÒH ‡ãá¨$ ö„›»µïǪ¦Ý¤@Î4ìo +Ôj—%3Ùž’Ë£wb)Ó—´PÀŽ3|*T¨µ¦×‚ïñ˜xªù¬À~CNš3Ü\¯16ªŸk—ü±Sì9* y5ÌL €Qû°ôiÖ¨UÈj¼>D¿6—Ñ“„¨¸ƒv-&hnØ*îuå¾ÃL,ë/§†)Xá§9÷ò Ķ^ `{ƒò©\6„‰ìû±Ê –ÕXk¦Ÿ ‹Mà’¼ ü&9q5Yá(.å\5¦Aeæ;Ûã‰Ôj}£Ó»—€6—‚¹M±>X‚XÕ“B<àƒŽ ¨PmÌ4ÚfY»°êõ ö“@Æ=ƒ»xÈö{þ˜ìS0'F>ßv=[Ô$k¤•ýVz×ËÍÄ“ì<0¡u*`@ Êpnºïã¶ JV¤ÊÓr^ß ´Ã]¤©!z,þÌ^7[gEeÐÇ]{8 /ÜÕ¼¦pãRZ4} õ25ÀjQC|J½%g ¤)ÕaBD ×]tÅZzìš{Á°å›‰RMó¦ª-c¦¯)12Mþ[8Å4 Fªb73sm–~X.Ú‰A¾I=¬Y4ÁP¾3О"Hùb‚þmVmýcC{3݆·MQ›¬é1§Þ¨ŖxîzàªS [X2)ùbègÃlb°ƒAíkÓâXNPn¨­Ê4>°óŒp€£i8xnk}¨Ð÷NØOº$ñ©ø`ÛÎÜË“¬kˆ,RïtÉ#àLbÚtëºÌ@›u 2giÂÔ©nR>•cž?Ž˜5ó¤U!-: ´éÙ놧‘ÉÝÎÁ«×>oæc’¨Ë®ç„ Ý߉gbºîg}÷Å*‡úÕ*¶öÍFØ®˜9ŒÍ¨„rÐQ$JƳ€‘§A÷p¬yW_ÓðÇÙhúÚµym¦.X“OÏœbø€ ·Mñ¨µ.µ­ê{þ6Tví ÏÃï|1híoÙ×`¼‚bƸ XüŽ8ÕqG0¤bf>‚9b`Å>Ð׿˜¨”ÝØZÛjw¿8y€ïÒwÛ RÖ´è±q ñ;aK‰Võ@Œ{?ëÓ¬b‡ J©pDÉ Þmö±s)“ †×Q¾Ú‹/Aš4,T7³ó.º™^í:o…pŠªq IE?Ñì÷~8·2àð¸v}žœ—¹5—‰íaª.STk뾡-`¥ZMÄS¦ü5!±í‘Ã\bwÓcå‰'HmHÂܰ£²#Âdù`ÒH ïn±‹ÛÔ’]ðÅ y\¯…*ªõdtXÀ¸[$v‡4®àôý˜¾Ò¼¼žᇠ§˜X s bgãRЕ?¡%™Ü´é1°é‹ØXºÅÞ±E´˜Ët¸Žš÷ü±Nš ùu÷{ÿp”–4Ÿå#¦*hpÒ»tÁÐó´‘¡¹{Å߆ø½¼à7‰m¿°V:rŸsôpÁ„â ‹ì–0ö¾˜ò(˜²í»Ý8ä([Ã\ýÅõÖßA®8HX¬ó/½Í·ž¸æ‡è¯wžÈ‚nÑB?¦H @\—@¸Ô mºýGãpÏÃǯ»7 O6)ý˜Ôì)qRD%´ÛFÿ,_Y,R©-3vÝú@ýhÇ("D‹uÁgŠs7ÙÓxÁª‹áÚçi\^ÜÇ­ÌdÇÃPyÓûívD퀔í°tï'|.¨b9?~ØeYj¨jjÀ#Ðfpξ×ë,ꥶ܌$És{*Þ‹@,ûC_pÒn:iÍ猳VvT^Ìv¼ñN¢.«²v­[cÙuè5ÃSPÙ›Cz#ϰYÁ¾4c¢Ÿžm·fÇ €T€y^¸{)*Tþ¯«ßìë¿îë…z§˜…¹uc¢Ç\H×m¹vÛ]±öj"æ©ËÌ?6¾÷úkth4èìq}[Gq3Ëå¿Ó —…¢½I±À¡P¥ó±˜ÿ§Ó¢kTå76Åõr÷™%Vþ@GLhЧNÄÛ®ÃP¸fÌÔ®MÓ¬×}N2Ôýi­4ª¥êš™ÐW¡‘æ0Á¦¥R9ž¤´OM¹º˜ûFvÕÍNŽŒ°zÔ˜–øa ox×»¬FþXU·•yuž8¤s·Nïû$j<7Á´B¾þ‚‡CÈËÚÝ¿f,tfkovÇ[0l1Ü:âµAF=‰¯3ê~>8­Eà‚¬H;χùb(É ÀÓÁ§o‰Åº‹€f6’Âw?f dP­ß}F`£Oé{ÞS‡¥Iy`³Ù÷Ï7ÓV¦ÏJK:÷ï’41#rgq‹¬º›0±täRÒ«¯v ¾£KÀ ̘N»ôà¢ÂËi¡“Þ˜P-6èV5Ä¤Ô Râ5ß»“S"-eòxÁ m#´=¦ë¥¸q—‹*³e`NÝtîן»lû'Ú1p›m"Âb 5CÎX±vêIè?~ª$ ­Ò"~EÛ‹n:íŽx dy÷xùaª­5CÞÏ=÷6(”ÁµÕËÆÅ}œs[¦ÄsÁ>ÏIò‘Ž,ÿìÃkæqöêV.o*Òp‹Inv„ô>Ö˜çätâp*q´iÚ´ *' ˜˜—u»ü-ˆãRy§ê?f£–¬çÛaOmmùá­¨²ºJþ1>Xs­VŽ»ü0Y¨ÇŸ_– 5Vº<°iíå›|+ßp»{ ):ºÚ~¥³›²ãn˜ç œnÍÒ@oíGœajæ›”\½=£¹€ßz‰jn^‘‰_½ük^²Çº_ûÙƒP,Ò;¨Ýq£AñïÆ´×ô”ÿJÃs.‚7뮘tj » Ô>ÖŸë…,¢h4|ñÆËÍÉ>È<:‡þ`ŒxŠwðïÁʺ^*ȦWHŽùÛϲi°16øé‹ÐHÝ´qj¸@”厑ÿÇŽÃ ù]Tt=…ðû߇‡ý¤HÁ«–'ôF,`HñŰ¾÷SñaˆGù²ÃáNèúa—N]¤uÁózvtßh=|°*O$4¨×CíxbÔ^Cñ1ˆõcI÷tóŒT¬iêEÛ<£œzC ¥#C°8µ]K{:Lu^¸iy$Ì[\;‚ª ß^zü±7uŽ]ãá„zkW–ÁhüÙ –ôff·´•2ò>#O®‚Ìê,[QšÖ˜ƒ özíã‚>Ä´¢Ók•K”x©hø8²¦v…$ÒvÊ”c‰›ô¾t±EtÊë¹æbq}zÙüÑ.Æ]éÒ±ØõTÿêŒ-o±5JÅÝéšõKyr[ˆÿsåO¼M;™¼çÜz;( iLŒ½1Ã_ÕÀ+H" V Ÿ+vN™÷–ŃñÖ1e,²…>ÎÃxïogýžˆÒfÀÍó‘…6¤o§Ï›§òÿ,kMOq…Á IM² ©Sry‰¥HŸªà-_Gå*¯ºùzl>£³Ðž1·¨¦è¸Ó ´õ ÍKèº`œ¦~½%gZŒ¨{¡Iw`ÔLþmî{ÛŠô‹÷J)Œ7ÚsÎ7MJ«‹ÇÞC<*š©E}!+cå²ÍD˜³Ø¸¨Øïdâ€X­µB–<;8’ô‚戶UD<‰ÅµUbëyL~ÝÏNÖÿ,%”ÿ¬ªòTßܽ~@§AÓ™Ïmþ'ö ?ítƪ»ãªbz~üXÓIÄüðÒ×ïÓ¨8õÔ©9þí¿-ñ')™W/›ª£b„GíÇó'ÏQq^½*…‡Ã†óÃTÌfý)ö›G …Fêd·ü`á{_wn±ˆ¥–jˆDË=f}ÙœZ\.¼ÙŠ+åÉ~ú™US7Ÿ´j½ßÕþ…âúO./x¯²Ç.ƒ¿¤ê ð)‡˜÷neŒ+¾kÒ /+5 ³ä¸SþïØ n¬oŸ4¥ÿ³À)èOGÜ-×ìÔ›o±€2ùJ4`Gª¥M>£\iåêb0àkygAݾ7ëÞqIóÄ í˜êw¿š BŽéÀŽ^ü‘Ã2‰¹»ÏùâÞ`6åÓ'ãÓ ¢@·S…Ð4æîü¿ üºëðþD¶ƒ ÕøBœ1bö… üg>ÅÁJH›Ñ{xY‚¾Ð˜=tßMðìjïR¢æù0ÂÓ¤ :lHâ9¯1Lï¤/®ä)!J_©~¯ñß¤ŠŠ:(è@>lJ¿eªÖ°6Tø1 ÚîtÄTd-â°~XÑÝ@Ÿu ¼Ý ê·bÓRœi·&ÛLû¿)Çùã¤|OòûÇAÙü0wþ5ÀÕ…»Zq'O@ŸváÚŽüwãKÿWOǧ\)6Ú?“´¿1ŽÒŸÖwâÓÐu`>½qV‡ÓË£:ŠjФ{8%êR¤£v¨â¿2'ÊqÀÉUnù•þÒ¯)óØô8»Ñ^­Z…Þ³?™šY:MÔ¶f¨±˜åī÷1K1é'OJúP ½—ù¦YŽë—¦ßœ#þ=e¸î´émý ²ÙÊ+^“tix£©÷”ƒŠþ•ôQ|3E¹³9$ëQ zì²{U=7USM^°žkŽ’²F!˜‘¦Æ~:7ïšܱ6ŸŒéá…œÍsÞÅ0,¯J;ÃHùí+'Ïœ_Ÿy'§ž4¨‡Ìc–µ=~ðý¸Ñ‡Ï~Pn]xÖ¢º°ä©M‡xu?·³1FŸéÔUx“æô¯£×¦¹º?øð.ôÏ£Wô³tWo‹cÖÿ´^ŒÓ¢æ‘þ©pÁ·Òâ©ÿ‘B«øÃÃp>ט:è´Š®Ÿ§QqFf"yK=4Ÿ‘8nA鎅˜Û‹¸¦”í¸?Ï+“½‚¬}FŸ\*½lÎfv¦õ«ºk¶œA8 èï@úR­"tz>ŽzhÇÇ5Vˆ¢Þ5ñÀ©éôw£̹ì÷Úxpri™_#P~8§šôÝjßíu!…*ãìþ‹¦ýë”B^·ÿ©­R›ÁJj4²ô)(JThÓZT© ÙRšª£ PôVGUtu*èÀ2²°†VS¡R4 èF®]s¾Šw“,ÅÔC6ì(æ–½¿ Œˆ6Uù7Ó™ ô:}¶…|µaàEµ#Àö®I÷F JYL§¥)ÿ—æT±ùŠâ…V×þÔï1‹3ž‹ô–R ŸûÆZ½)·Sij`4tœX¹ŒÅùd°úpçë?ÄÏá‚Õs™–€¶Wi!§œ[‚Nl„çÚ˜ýYà ™îípì~ ,G4ΟzqêóTê©Ò ·þàâ}®¢^¿ÔÔ¨ýベ_MgR«jÌãû¬ë†Ÿö‡Ò%¹M´Ø¢éð8á/§ý)n½œÅVièE¬N'1é_JU­ _S9˜¼–Ú‹iÄLúS½GÛ*µ¾|P>¸RÞ•ô´Tݾ×TÝò&ßÖŒ«éJÛþû^»Å»é†·Ó’èZìöcYßÛÇ/¥ý&*éMÿ‹ ­é¿J±{Ì®g0#ÿÛ¨#Ïé/HU»V»5Uœx\Ïû1#Ò^‘G&B.j²­Þ÷# Ezù˜æíÔ¨á§Ú‰>8•LÅXPRâÔÔyø}0?›gŠs˜àU»æA»ì³·³~Ïò8åÉf td­¯7—xùŒÉ³¾–ÌQÒ*QÉfM6'BV¡¤¿TœS¦?Ù¿KÓ3§&rèƒïU¯Ã¦?½á¾7£èÓCÖ§¤òBß%¬ÍòS¶)ŸJúgÑÙZuO³qó•ÿEéåéL{F¦žëa[6ž’ô³Íö¬ç‘:W’L»º­¹ðˆô? =’?ñS*˜?¥™«ÄÌ7…Õ tþüóѹ ×þ‘”¡Z>¢4c‰[ýȆÿ“ÅË5ËÔ¤§åƒÿÕì¿6ÿÎ3ßüOñç‚?Üßg>þ[5™VÿÄ©UþÒØ×ýê|~ÕB~dà 5ý-AÎÍÆËÔ·õNXiççƒo§ë(ˆù94ßÿÅï·×¿Aí Oü–ÿ7LiþÒWÿûb}?žé‚ÙßLzO2fc-K/“_;†iÌbxž›'ÿLË~Ì–.âúráßlËH‰ÿþ xÏ­ôfgÒ-ïg³ÕŽÖå¾ÌŸáŸkþËåã»í~’—Û?±ýËí÷¿H÷FÿlŸó×|iþÌeÿõ¿H÷Ïÿ|þ˜‘þËåçÿKô—ÿƒÿÕŒ¾¿þ/Ò=ýgñ·\öc/ÿ­úGÿ‹þ<†?û3—ÿÖ½!ÿÅâ̧û1èuW'O5P¹¡Z£y±À¥‘ô~K'HmO+•¡—Aú´‘Ó‘òìÇd|‡ÿÒ_ÿÄ+!1AQaq‘¡±ÁðPÑáñ@0 `ÿÚ?!ÿÌ<še{i”B™ü¢tþRb`&ö¾Óªœî€¨Í“m±mÒf $½DõLÊÿƒp§ø¹óPä¼ÑaÇé¨S}‘p"ÉÀ›®öÃȺ½¨7˜²BŒƒûtíÄÉ›ƒÕ謳ŠÀ­ EˆÃDGãØ´E*D,&ÿI¤…›W'9c& Ú¦7ƒ3¿Á¥BfW+wzû¥% ·æ)$EöüMz/âBâ'sÜ¥èiÆ}¿õSÝIbVIš^‚ Ü’@Æ>é¦.K?£û{nHꢱhw¥ŽáÆ]ß4 X£1ŸçÅB²|š#æÜwe3ú/õC<=S,?µYžÿŠ˜ÃŸ¼Q»%Fú£Ýl2û« Ôd¼(õ1º±®•é´^ 8 â·•Ü,{æˆh'íð[F]MY°Î~ 5æô ïÅE“/÷A›h³`—sqEÒÏñvŽïÏâ·Ý Ž^xüÐËýÏ©­à69¨&Øb|P EåÇgz¥þ ÿ÷ûyD1'ciÄÇÕG°lŠ^#6ûÕHʶ,u0Âù³L®FþI*ú`M¾ æ˜YvºaÁöD|Ö…oäÔn-÷ñËêh¡+;Eíà½NùHm»R(CßLsH-åœÛ4æ]þGû~øxüLüÑŸ6Ô’/•\ê8(Él1M”üÏî’ÈA0Wé4°Ùóúñî–0SR>½pð§²³w•¯+Ô€‘f”i,†ÇïÔµ£ðÍňÃ?Ç4•©æ·ÇÂHõQx˜>éC?x¼õW½E/,’çƒ"æAû¢Å‚w£?ãtÞŦhdÄ|ª>ðÿû¦4K{·¬Ô 1)é—ê¶ócúgê’^Á•àR M³¿Ü(äivœÇöþÑBÚÃ+{‰+sËàWÇšX/ªXŽ©ˆ¹r£b¡ÉÒʧ¦Ûм6€/ìušÞÌM3འ]S\`šXOn9§”Ì¿þßíû^WBsŠ;HЗÝ'ÙBÒJPHÚ ÚxÌ^så¸_´Ì«$’ÃÔ~©&$¤ŒÄ¼¶¥Ä!ñ?}àö•:—qò Ó!Ÿ~5A[ e€qóLm¹˜ÅÅûþÞ dÙbÞJeüE¹Çâ ~&‰Ñ8 p-¶SmbæŽÁöüʤ èýÏÊž#7_…¤-HE™·ø«&˜¥ª b¤s ¸—çTA#9mb˜ªDJÊ1®üÏöó+í|_º±ÐžŽñMrÂÏüÏËUyˆÌÅJŸÆµáÑmªê6£›b.%]ë¦kõRB­ìG’?Öê:ê[¼Ø—êŽ^0¬@ò;ºª?·Ç„éðT`c<¦s1þ*Ñââïõ›Q£·+€¬üM?°¡‹¾©ŒÚ9ÛóFÌ'Up8š¼V 9÷}fB>N_UfŽÇ+ë~jC¯ù„ÿouçôÕ€„[6Ä'ÕµHUìã¿5)uK“Ù?E½±Ä5&Ì!ohüVª(j³o‰sßœwEÈ_À~ª«ý ( Eš˜×öõÕ-„0ì·ðå\§üf¡*#6_ˆ/êjî6 ¿ƒš¿þ<_ªC;‡é4€†dù¡©í]þf)tÊäqƒkFz úH ËÙáLÈÀXâp\wS œ2™N«tÑ0º!äÒ¨L+MƒœNEµ(bª¼¨ÔBJ¢B"O“e3Bþý¼¡ÉÈ»æ˜v"#Ç"rIzjŽE^mµo C¹–9u‘W@–°¶äŒÊŸ¦f¢ Õ» Sof&nÇ TLç±y>qR%Õ=Ðî…ʦë¦)ìUî• eÂs5» ”0r¹÷+}\Q1n–(GHßYÀüß”þà"A›Ô_{KÌ)0t]JŠð9y¶é/k]†ûj¨È§ÃzƒÍ³^ˆWN„ZL-úô-(h[Î2—Õz,4l3êjP6b²;?µÎ;ÿýfÚ9ˆ·<G_ g-ˆ'h‹ÑX†Y±ñC&2IKdÅ1sÚN¿«º¨W³ s°ú]Þk,?UßS¬|Ñ$²‹Ä$ÏWÿu¥ 0œ„—ÿ´²á°G´÷rÕX(doý¶õòØoË?%LÀö€[^„yuŦCpù7`=c©T¬‘%Ùþ>·SlDsƒ¨¾o©j!vávEÀb‰‰©$¡l²#Ò 6.·b¼ QšâÊ3Lã¼:š•’D@¡Å§šÄ;–È;m,˜¨®ÀÇ1¶ ÝÄÒŸ>BÿÛ – ¹ÁFÒF_€ïU}V7LEØ—¡+*éhL·³å¦<‡ú?ïÑb›NÚ¿?Š|9q‡†3}¯PB}I‡ÔÔÕ‰HÞÑÿtáw9x\'¡ZGLdOÔÔ}O@;ð½.$S/JÆ$Å –ñt÷îÕH$“ß\)‚pàIõKÎ_ôþ¿¶yVïê£ñ„–Yle²b£äàSoÊÖRÛ¥ý_ ymAnSqq.1|јâÚˆÞOÌRc-%ÐÛùÄÓÚÈMÛ[` bPK¢^Mn+ KIM®Æ›Ô»ÂkùYåB¡I› ‰|‰žTHKØð«@Àk4r+sI$® zµé´ :®¢Æ¢ ?“ü÷oíkÔ „Àß</Õ8ŒÐ™žI ÐKˆLäÄÔpÄ™»c-í½P‰eùH=0õP;û”ZÊá^ÇÈvŠ%ÑËk… 9bx"füÛ 54‹Y~M¿4¸WrÏ?T ¿Ãæ·W¤òߪAaq) ›è]R@¸M-ŽÈÓŸÓŸÇœRan‡–Øó6!c£ý©'0R¥n€ç·öi­»üÝGÜCm/†zš¸Îdà˜Ú"õ•`8À&#ÜQ¨4 ²p*ÜÇtãý1üæŽ_$žÎ¬»PÏS~b(` vm‹ŠhÐef±Óü¸inѨûj¤¿Õ½eC›iw¤™Á’[“1ÿ½AR]ÿ¿Ô2¡¹4߈“é‡ê–JašDP¯àþy©§Rͼ¨1¹0—U= xy–#ïSGáeŠ^‰#zÃ<Œ*‹’QqÚå­N%YTiy™‚Q+\›Χ}ãu2aŒ4ë˜\f~~3Š;aY™n BaqÊ­¡H -øžêP Eù¦&mÜNªÞn2Ù-ƒÆÍZe“ Bn ŠyC#y‘Ü7Œ¥z)N?Ť)aiý}ÔSƒbIJd/¬œ/ý°¢éqýqW%ÿ•mhý¸û«­¿ÔWꢅÃöSÁf žXÿQÄôeˆŽÝãºàå‘%fg;ˆæ’Ç(%1ˆ&Y¶+ÅB·0»ÁŸ4]Q'Å^6¥M%r—¥ Ou §©š}K€¹$ÓœÚ %&Òž`èœ9$¢˜’±eŒGAu%óó+§É=¨>ìèÀQMVâšEé–”ݦEËÊê9P C„š9ø(¥$5ßôF'ÐÕX)€Âãy.S”Q>Ò.ìc“úÔ{¡/ÿ€Zlg4)&T8öÛî”LÉ“ñ⤌ÿ>+Ê–• ˆø§®cu¸aÄSLI[6‡þé씜.Îüc2ƒ…–+¦™L¨wx·W@qÍý Ô7tE¿¬@fo©¤›À昲Ÿ†MÖHŒ ÙDM©hž ¹´ƒ<[,Zu4³u*,±ÁÅJÂŒ$~@—‚æ­p¥Ã(¦Py~i-š¸ Ó°¦ûÇš—Τ£N¿ ˆ ÎJ,qò”ˆ²!oî’|£çÿSƒùjþ§µèž#²{E 'ØZ,]¤È©)‰‰üÔ=(@:-—½¶Æau) ¢œwEfEÍIùñOz HYLeÑÚƒze—–9Z£(Ô'fÜßY¤2y¦/Cä^6S™òŽ$C"†õ­æ¥µŽÚ¡âå'pw ÆmRÈÕ fÁ>JÜÜ%’ ½®J‹Ièê…Çœw@d[ÀoiÁz$‡r"ôvê›0ÇÝçPj`üþ(‡"¡ã„gêƒóøpüÿéþ %£V¿È‚®àÑ÷1"(9¥0oáüÄTÇ5-K8·4ÉZ HWX¡wü½9=|»ñ3¢ö¥úŽ6r @© °;ÚG)Ñ­‰‰¹É7Îu–Š]ÀÔ.—8Dú¤ÉŠÎYÍ ¼_‘P7’LJ½-„‚XpÂ_ój~h:‰ÂÛÃã6¡Ù`?e“÷@PÄ#Uf5~u56*dWïv}¹§,RÆÆRO#wŠ/PÌ;_ º`UÔ‘-³X'#­Ë¯qM÷†?(¡¡;|çY¿žqA€îÉü}PXlÏûš[x`³çÍ. ×ñ-ÿ«¥ð†Sã ÔéŒü zÔYüꘛÚfD|ÿ¢œ§bf5纲Q2•ŒØ—àz£™M—.C‹Ò"fÔ),̈`˜‹¬A´©ó‘‡»P9 X¤HvÇj—­’„9qSRÖ\^û~³QƒªåÊÁ2wаãÁ{0Ú6A滩yµ'“œP(é[@¹ÔÄêk8ö£GÍ@n(Ã7°xaS;݃1¦{-zŠ ©K{)n£¢M©ß:Ø2ôqOÿ;dƒlÊèÄuBâ!deIîJÓ.L†Ç!š˜éÇŇi’8<¡|¡Su—åÖšmeÒ‚1¬î¾×Ó{Tkç&C Œ=bœaAŽº{sFÈÞAfK4c›g)VH艣ân $„ äœÓzsJ×]ºO•/Il‡¿šˆDH¬fV<‘Ía’TÖJHb)=è I~«7 -ÝÚ×mt6ÔŠó&mÊ[58@ÐÁÈ”s†85¢²j®KÈÑ¢ª`„OÿE¨ñ(¸9÷'ËW¢¨bn§äˆ¦Æ¦&De-Õ]É ár´ÌÅ:PkçJŽJgQ4fÀÃK6XÆX)®\ RD[„Ö³ÚÖB\‘5ûq{?|ÿèÃQ€3Š:\<“ѽAÄ<‘Ûºl.·õSG8ììÜÚG5NcgØ$ûµÝ'ÖR XXy-zI#Ð7?f™u*û¤ 9’zœ§ÍE¬…«)Æ^N”bôæ&r@ÞMT'¬rù¥ö6´Ï'·‰Pl¶ŠQ‘aM¯Ó`AÆ´»†ùÅé3öPÆdQéªíBâÂaøè$¡Ü ÅÝÈ%ê1²¶«¢1½º¼Ú’Þr最AaYºx2ºê*x„òäDdJƒ”DÍrˆ¯ü¼É×w)Vµ®cPܬ âù’,_‚æÂ„]Ñ„u2æßêaä#‰pó&—L‚-¹qoŸu33ÆÖ’r<šY&Y y±AaxeÖsÖ$Ú…¤”Ìx‘;9¤ø”TIHXÎ$Ú—Ý8—5|ÁƒEèðxŸºþeëñÿš*ŽØ¿Ï<,õIÉyËž©õþn³#Lõàs¦] 2×ãvœ`s—ª¾šÖG~³ 7T+!‚  ËZ •’õM¢‘*½&’¯@0˜6L f¢H&ô> ”¢ì›ðÒþqM &úº}êž)ÅÄøe”NU(“R û9a¾ÀnŠ•¸ìL}«71zN¦—a2„|Ñ%[¿(‹ü5j%øª)äŸ!Ð=K¼©±èNrˆ9Þú‰µY;=¶2â}Ò“gÈÇž͆‘ˆ¡+1h™A6dD8A+å;½ ¸©ºâu-84¥ñΩ2ǘaÄßÄ­Õ“‚óØ<Š ²çŽ>Š@Y?žêçžNß.¨Ds ±è7ªµ‡4²tàÃnÑÙ·ul;.x¤˜F8oÝÆ¸ÊТ&GdôU¹aÍ8ü’æ…•JõBçRQÀSs–u6ø>ÔˆX™äW€A¤O¡ ðú]úE^&rij‹m ku˜&_;T¶™•üÂáÀWCP„–-£§”n’v•2õPû‡SS_˜`ŸøÅ=Tø´6ìN¨U 7cÍ[H†üØ´rÙÔÒ„Í*eú=óz4ZÁ­Ž*µ¼Ünò¬&€[ “ z¨x_×pTL ,ƒfŠàC*ˆÛïSO˜ÙòAø}RspÏ‚‡U‡a”ð½@-¢Xñ}U¶!>‡›wå·_éšÀq´ºLÚ€LÁ C„™/ê–0 ÊuÛVU±ˆË¸‹ÓPtÁèÒŒ&»Ÿ—ℸ'ù ú¥xŽ:适!o$,$hLòTâö² 7Àä¥h tdÛ¢.í0h®PÈìQå”?øð¤HW1& r•`P*CrÅ5Í!L¦DG,)¢Ò—†ôQ¥ì<ΰ c/a‹ÒMh [¡‘7±z´ÅZŽZ#‰.¦ž„Dˆû‡b{TbÒLÂ?u¯©©·çÙghN‘4£¸sÙ}š_³vñö¤ç€£Íù ê™3Å$E ŸSÕ “èZúGt*ìh¡Ÿa(eåòó,Îf5›Wg:îô4ï€Ô˜¿£iLµ ÌHBgýªó„‹0.rÆñz’¡7ïUt\&ðˆ»Æi¸¡ˆ­ñ(%üÛqSŒD:˜^1W(/¢.BÈòRÏ¢¸ÈeðW+Û¿”ާAs¥©7átW[OÕMH©–wÅ{ð„óh$¡¨‘ÅZŒ¦¤D®|ïQœ&•EL]ˆ1ÃĉG ä +Ä ’AXÎ;Ÿ®ÿñáKÑí¤rÏ„üпŸTØ&˜Ë’±IUN\‰ëI#x¥a9 Œ2^ÛóE.,ú‡",¸ç`oâg‰¦¥Ñ0‡›?„´&9¥¾öüÚš›ÊY3‰·1ÑInl0[÷D“ êaSB9'ÚïŠUÔÁ€ÜÍ%ZZ,š›/6ÐýÌ-ªäßþÒ6"8l¡´3k"ŽF ö ?ë œ›æAð.é˜p41ÈÉ6!{K³Û %æEXÙ,ÌLÎ¥MðOpP€^B[0g‡ú¡™$…lJr]$ÆÓ1ĨÃ= SC) Hè;îæâ€ §Â‹‘d ‚­ƒÔÆ©Ä^qóTÁ뀆ÒˆÁŒçøÐÖ1Sz6?Ê­YQZÌW!Ôš ÄJlÑcã 5,áÔIóøSNÃs*ð‚rÁ9´Ä|䳘}PÈ<“ÿ‹éIŸg²Šê†Ócå´wŠË«tãŠìƒ•)ŸˆbZŠÄtÄÄ#R 1,alIÇS:¬WÂ1Ž åiæ¢Pt˜v˜–fœ[ƒ÷R¨~pýÜÛ–sÒœŠŽìˆƒ¶ø™7ù/4´½.-ÅMbÆŠÙ/~B;¨Æ¶¡Õ®îJP’^òØrZ”)! yYÒÑx‘º,JBåÔXlŒ¸iÂ&è–JÙ_*N€›°‘®‹Ð¶ÜN5Z Ñm;NJ@Irà㨠Cžš[ ‰B-"¿œwW„™œ€A( cY0Üø§#ø»ÿ‹2­ê«gñYúníÐÁŒÚË©?¸åàÄ̳³OXƒ•e9‰0ƒj—Z3#fø¾>ŧ|Àì&Cx„QA9šäGÀai)ÅM[M¼ª3g÷,f.Ý Iî\Eå°÷º‘b°@i0t¥êüâˆ‡Ë  „‰Ùía6Ã1±¿,Oº½Ä±š6›¹`6”eÏ óPXtRÌÃÎ8hà‡ÀßdŠš$•À¯¸pKj6Ù'%dG³KA&¬!á4$-þÐÊÙã;ŒS®*J^ÙÕmÛëí‡åD0'›\ê…“£êE©øôRÁí¸©]6‘ƒÊ' ¨oˆ÷‡ÍZƒçªWðþŠÆ@uM€¯Š\F‘<¸3#Å9‹Ú ‚JÚËè½M=Àîd’6*V0 L™¤[˜OüYÓ¡´ÈrQönÊá›xPdIŸY¡S+MÈ£½ƒ*ÎB҄ĦÂ" ^û,–6GeA)åä`œÅ]>ÙS„]:¤ËÛ2 BÍÜà†¿ ¡¢/`Â.t’zdD‘GT³Ha5½¯™a!ò–`©í•G¶¤ ¡·tµ¼fÕŸHw6Ýcu`ÊtCSÈA›•èèw¡¦#Šqþ@uœTR†~`ô{TR#Nà„ŽæZJ£0ØIòÅbŽåç€Ö‚Ô©`9Úý‰ò@}P2l-n«üóRâIÈø÷–[Æ}>ñXµ;ƒs}ôQµ^PD½»µÅi|}ÐiÅßùS]dû Ø.‰K.ÂðÊ\¤(§6+îñ¶à£öˆmg‚&è;&ðM°…rÜeº¶oWø"}Ñ€¿üY?Ó.|¨od› ‹ÚŽâ$ñ­lÁµ5b!S±(~ XˆÖRÖÇÂɜ՟ƒ,\|ãªÞÄ‘PFjÀE¢üŠ ’wEfù¼àrñug2¸˜oÃT# fá¶x¢s‰üôe@´Úo ™  ë »#š«@,®‚£ÀÉ>›Ré#RÄÚF8à£ôÀ[ß25ˆƒcl“¤¦Ê›à~Öû'Yµ#6Ò@öÒôz¬ƒϰ}L:h*8µöœc³Ää H7ÃÆ³Ë¢ßð ïs¶‰¿¨)äÌ‹¸oïÔüqÊ<§Ã¼V¶±gŸ­ÅBxà ›é3$ÌC4òÉs2'ÇX‘%¯Á @¡D=¦ÍÞx†Í$ä[ÀI\büÖ}nAŒÃçÁL±lº4дHddÊl²g±' "(Íëá-åQâÂÔ/ ÚðÔ"e$©±k"xÉÿ…þ™Ó0ùØçòƒG¢ôx±~ÄŸc ¤®:,Wü4¾€Dá{ ÁDX¬mox_…À˜¯¥¤Ûj-ÊÜ.hY;‰º¼d"‰ñg(æaÓCž2Áä݇Wš…”Þ·i;Fâ­èÊI…)Í×â!•4Ù¤Ee‚dØg3g*6§dâ|‚æ" }:Løx¼S—}ç  ‡m(jòÓ<@»¤š=ÂÀ8ê­Q0{lŽfMO±¥Ù9ø§€ s¡G¥ ™Y—£Ñ®£ ¡‡•ãð½7°€µã¼êh®`ù6ü Ìù›4H.åRÁFÀVYtØaf§eÀbÐqî‚Kcx@<Ù¸¯*‚Õ…ç •"ªA6\676"l’ŲàVŒ&¾QCu¯Å4:*Ÿ€¾³SxÐ’užªL%_­CÄ쩌›,b‚WlL”¶O ¬a‡óÖm@ø í?zCøYázþ–½Nê**y}g Ûºš —‹ZÉ›“}ÕÒ`!±8PØïÖÈŒEj$,]$¬ Ô2“ï~±{`]DÎñQK ‘:C1ìj֛χÊ#º´J"YÑOóu$’‰Äie[Æ••€ã¦g(Ñ’¦âHSÊv1Í©¶î3!É;WÛIöEò!=µ6Îà_|P^øA¬v =u–È{‚ËůCGñ= rýi²©~¾v#°ö~bƒ“Bï.*ÌF` ÂBéÖm@{E#­-4) ÏÙ˜nåÂWÇଧûù#!!•¡íLœðÅlœÃ.mBDô2t%~MÅFŒÉFí¤µ$Ç69ª´ˆ."…wßBkÁØ£ Rl-–œÑ™N׉~ŠG,úncvZSâôÔë´GÖðPh€˜P{¾¨pÉb[w÷LHZÞïÊà^Š]ì„ÿÒ†S ‰ÿÀ±ýsݸäÉÁ}Tc66øÚýÖ/Sdc<‡xDÌaCÙ‘B™•Ɇö«@ÌHäÝxšl­¬!†xI èb¯Š„2³C`²-×À¥yÙksŽ‹ŒU9J• Bú $î( pJÈ+4 à#ÔøÍF‘Ð2P#Ä·pÌS‰D1KعÒYÉq§Ï‹ÍÅM6àÉ=ê§#‚Hqa=ÙAºÑ6?Ý;"Ó“†)¿ã,Ÿ²ú½_ -Á˜-ŸU ¾ ½À}Ò‰J x£ñŸ±$€gÿböŒìÇI+x»ª•‚i â›GæÛ«# %[ïnÀ¦ 2lt[Ø–3îÔ9Øèfég‚®dNe‚^h¶`Ó; -íê†t‘2yOž+,`—ùSL²?79Ô'n’ ¬æ²·±ž8‹µÓDs9)N{-Þ(ŸŸ|{‘V“ayŠüI²´%@½à­#Zy-zL*xý÷u†þè)¿õ‚ÊæRÏŠÍ™ÓSÙ Š„ÌM"Ù%²Á%Š?+lˆL7«Èe\þC]ôÃjAÛyŒI+‹Ioæ‘Ô ØLJy6Ó|«&~Ôªn– I‰ëlñ¨éj+û ý°¤§ãüÄlè½üfÔËcPšü µ7G-ƾդ ôÃ4ê†ì]2wF¬$Š:ÑõS:´ÇZ´w1Ý r/âÇGÅ6W‘x>Su‚$O–SäIðKJȵ1çe] aV´É~"¶:ÑG1„žÊj¢¹ÖqQÊú‹ÁQå™Ñø‘Ä·‹ÔÞnDÀ]NMºËjW)/dÎâ/Ká×N}M4"K”8*> 3½(VC ­'VÄ'.ɦ·›šfÕ9 (™Kæžj=Î\#Éô ‘•ñöd€»¡©y $«0ø/¢äfѕӽ{ Ù ’/YÞÙDƒÅ1Q×jKT΋¿^èG8S4 Â<¢ k]íôwÝE—•õÿÕëÿɤpLH[Æ<ÎW¢ÙËÛ€Ña|9‚âP(¶K6hÐRíœÔè/Qá ÃF¤„“Ù"ƒÒGŠ,ó€Xá{^#-ª7 «™Âìsjµ±<˜÷EH“ ¼ŒÃ]+±u ¤¤ei4° 99‡ÑŧÖÇŸüÜTN<br¹‹Õ€æàï´wCŠ DfÉ¢šJqs!WH.*Õ˜<'NE$Sí ƒYµ&l ìÌ#ˤI2<•Ï4…!¥!Qž3qzLõR“} ?íÐç¢!n…µž«b‘-“chÔ& C)%xæœÉŽ0¥ÑÁîÕ3Qàå„ãx«{òEsß+oœÇ”,D„ Ù¿*.ÝÑØbÜ'©ÑAÆ‘Û!ûÔÔkMz2-ZXQ&Ȭ“Ú9„ï,wÅBŽ)¦ÜÂ$”‘ÒÓË1#–)ÑæœJ©\yò–áQ 3·ã®p^ž$8"õ&­wS5¾ÂŸš£…¯Ö™÷O´"9¹¿8©Úà?ϯþ¹øVš€ÅIB`B\Ü›ÄH€Ö€”΂‹N|^ÄvIúk5ˆÖT;ÁoPñÏI`ó¦†`YDDdlÊ”® ,"TÅ"d¡ÒMÈ@ȼžJ(–$ÞèŸg¶¦oé…Y\ó­Å0Ow]±¨Q#{xGÁcqS.u¶AcÈö£¤$Œ7ùTv4g—ô3Õ1T•¡€OJ ŒfÊ`¥¢‹T&Ðúà !-”m1z¸XÄÌ„E¼TÌUÕ {@S)Ž—# üQ9 ‡7VœÑ1'±}÷Yî(ù™,ÉäÓÐGÝ(ð†ƒ7´ã¸©f„uÇÊ\Ó­0º8ç:ŸT_UcIãà +6ÜU°Œ^ô%Ï4¨,»ÛÜùPm$&B•ˆJˆ ‰òBñ†D¶ÿUpÎŒ¹SHêgTšKÅ:e0R,€MÅù´tPèƒ b“™m§¤Óìfq–nµŒYµ€!û0I+.ÐÍKam'â·V8&rÔ~§¢IPdÎa· '.ÿux²:Ÿê\ÿì¸dÈ·¢Ó96ÎéS®©Ë€Ä8/j^BÃJ\°ÜN Úšs‰Ø9.ÁS¬Á‚4‰ºåºTŒWvž4ÔC4 ã[Ù§ußÈ­JÚ›nÎÌ!ÀÈX„2¡RáêÊI& MƒR,+`æYFïS1»ó …»ìÍ–ÓÊT( »Ââɸ¤{ °üªR¥×âbKáJ@ÖF2¿Ø«ÝÂVp|¿5 t²;£-Õb£;¼4¦’ ˜Ø461 ¾©ˆÛj=H@-´BD^øŸFk‰|^Ö¿3S3D!–H6ã6¬­ë»a³pµ ­Þ ‡l/;ª Êå12F$`ÊI¥n²Ä2NLØ/S.ò¨‡†é›!ŪsKeqB—H²î)&B«+’Ý›ç)2}ÒcÞ›wýKOÏÿ¡ÿð)2ýˆwÍNât%|¥±:šxØ70‹¥[­ÃʈðlÑJ XüHû¯ÕXbÃÃÌy¦8¢XfÊ2´TaǶ? 'f£¶´çÚ¤¸E!›¿4M2È7ªn›(¹`<Ñ`РF¸Q5?ÍrL¹qúÔÐæÕ \–=÷¡:ˆÇ–Aiù¦IfÑÂH9½«†2\ž+´5y$@BÁ9€‰R‰0˜ÅD«@µ[ðTÝ^ øªÝ‚3µ ƒ+Á¬ÔäfŒ¸“At)¾éø¶â€¼±”rD—Ö}Rãa™æ?º@nmž¼ö{ ‚ 26?3ÖiÖQÞMJ»Å›²†^Qd¶y¡fßüÜDÜTa<›l£.¨® 'ÿ¦{UÕcç´ †>«ßÑXhf2ÚôË„Až¢gSH±ù –ÿ"Õ#Zæ“ âÿªÓ< ò¤‰%Ç@·wýªÛ¹>;Ú£ÀRŒÂ}àÿuhÛ¹fE#|Õ*2óçY)¼·p3齘à5;£|£;\§0T”"’€.ºïLVFΘd*É#Çk³A¨Â ‘Á€“ó³ºE”šW>™ÄíLQË a`ónJ°œ³¸B<:ZÛ²L6à@{+L:Ð)„Ù÷ª‘±‚YLiu4¨ @p+E=;¢àŽ^3x-ÿ)v5{'Ý%Ua¹?>ÇMDÜdeaâz©¼K—n^×eÜQ]ŒË‘–šü4ƒÛòDfÁFˬ!þjò\¨âˆá÷ªy,’#cùî™D7–݇É4ÙÉ? ³Åû©TÂeãŠçW±SÚÉél/“aáâ¦À“D.–éƒMéÎÉՉṸ¨„’ ‚ÐÚ!x B°îr˜fÅ„¦ÖF謿›‰}[ÆRlYV,Û»œìFk‘¨á Ö-MÙèYµ+#Àe4â/AЖ§"†ZJ„•+ÔpÀ×AKŸ Ù‘al ‹d“1-10¸Rg¼w@$” øÒi|äñ¯ñëÿ‹ýa§6Rèe¢ìT¿¸¼8¥½£þ[ƒzÀ˜a'–ô˘› Rm•Ž;_äÅ6ÑC°i‰ 4±Bˆd"È™@9bô!A½9²_Š€æ •Ä6 ¥WÇ"É${i¡–Æ'%N­?ѯåî)×_hMÜ¡lW*# GÚžQ"0å'0ésÒ/XhÁ`˜'Ást¬Q8l ‘¡žñV c¬[Vdz •™J,Mš]XÐT§¬·_’”ºÉ{¡‚VDm-EI™Ð$ªà°j.Û™€ÜbÒçÄ¥w ÿbú¡n›ðNÀvqVÞY¶Á.`Ëœ^~€Š=+êyPèÄË!듲ÔBŒ«=kQ;jˆ‡m»¤óÒYȼxº†Y€^6ñYÃWï+ÖUé_:IµZøaa €G¤kª€ðÜb€! ù¨Ìp»„’ wk k®ö)ó¬ØTÿ"a6 ¦x >ÚDnæÔÚœ£#±a‹éÖjêUØÈµÜ¸/j2}Ò³[P‚Fj|Ū®©2‰:úƒQØ *.J›A™€&s×ù¬¾¾G:X´V香Á‰ÿâÿY+ª¤ƒûN - –Wu`Ò-¾¢&~fI eâ©F ¸"ÙSg|¥#ä<)§$“Hw#šÊ•˜ðÙ¢‚~™u蹩éHÃ3s²Dëꉽê‘&J'YOMW¼Ý‹úZ½û!7eŒUë0?‹ >•F¦MŠ’‘¹Š)‹‘w¤­ø‰)PZQ8Hiw Џ ž þ*k`”\Åú½E"k‡Hë8áSÊ›ÂÀ †T†0F&‹Ã==粦OM…4…Cø„‡ê€ Þ«*ÌLç‚X¥8¤Ú™$°[,©ƒc”ÂÊ/ë|(Ò>ó÷G¤«ÎmŸÎeý$è¢ßeI1*2©­Á „byG8½XãÆ¢)¶M¡¹”ÝM® Z@Jöà 5lGI4¸á[S¥Æp ™²5§å…åqHºG榽Ä|328!è’¹i4¨Û(Ý(£„™Î$Åýà–Õ-ŠQ8Ú¶c‹M >yð©ºQƒ2¼[€[š ˜M ç³ýRfÕx…|¬ìžÕKc¼ ¹Ãî®Â(Ée‰†Ç¤n§ Ûÿ¥vnKwÿƒŸë8BGC/òô[@FWRDc¾”û&nopq·*Ÿ¡BiI9‘tF0™ L:{€‚Äáeô¦ms‹ÕçTŒw4Å &į$”~2ŠGA±úW¨Ç¼%ˆì“¹½L#C`¬+¤Ïš˜ã–’m(ôxjZŒ•náa†Ôæ¸ÙvJƒ;I'qVŒ‘•ž Ê\ ?‹q ÔY—Á-8ÄŒº|$í"¯¶aÃÝÓg:bt³P: X9åR(XØñbÿ‹ÑÅ–5 H—MK´d‰Ø¢¦R^Á€¼ÎüÄ¢ XbnùâoTbømÂ¥ÐÛ)„ùÜœˆÒsÊ“ 4&ïg¥‰€¶QÇ$9m©„g81’¹è§¢O?ÀqÝ rI)x-Å’ƒ›—¬JNÆm«1ŒÓÆ-°´Åƒ{« ½H8oîŒRŽS:ý‡g·'6¢ÉešÍª-Œ¶-äÃiSs#h °Ä‡Ý&‚¢{a!Èšii¶7³Áœ+Eˆ’W<ƼÓ8'É]la½ªvçâ§Ó{7ó5|{W)<°JTÈÈ"žË‚ŸÂB(>æ'”r F§è˜ëÿŠ…éÌs´KaÅûÅ02µÐFA¥%š‰ÄXœ1zµ’»J‘(‘H˜Z`LM×sEƒ÷ª\¢D.Iz¸LêI;LQJg/çÎ*8.˜üP¤Ó¼>¥ú£Ì©½ˆ†ÐßTèÚ‹•FV›Å9aQ(= Š®éï)i–D~«=Œ„”¹Ž ‰¡òÄ’þàj36(He1o9i¤¸â Æñzù¸(å1ûò¡ÈÞú<uJÆÝD9(áŽñNsºy‹$J‹q¥6«²ÞqB,šà’Ql§¸ó“Kîh[È¡}õFƒaäU›¤8A¡‘ÁïP©é¿TòtÀ1dòòÁ@£<1„¹¸,^·D¤?Æ,õž¨&HS‰«%ЯT#6R/D}*t‚Hs1]…à²%“‰¥«ˆYí¦g^>FgVµAßîÇæ1¢qÖu…P'ŠÍlFñzbsœAv¦¬q˜xÀÃE¦º¾fÌÎïjäj_ ô‘~šg,8 ”‰È>iQa7 •ÁµÛ^„¤±€ùÍ}ÖÛ±ém;¦L¿Ûš™ ²ƒ÷V¢Ú"àL¾pm¡ Ì,åç÷ŽêñFSÞ±Ò' ×|7çÙbK6.YµJÖpmßÕX tÝ"ïa¶†lò)–e¡x™ëÿËý-Z€Ì¤Jà“õCJ‡Gxb¶À‡ìf´Þ4Êvv'3¤Ë¸(Ør O­ ‡Î28B Y-*[W »®Bô»7}2È$ŒÂ’vÌLÇTe`ÅËøçÕZOv~q÷Vß\þoÍ´‰Ÿ´+¢;½(QIOPkq“©Q˜°Ž„k+ÆãJwÉNSQ¨0XR,м •tVSa°‹âØó‹Ñy”¡ŽdjÿÚY¢ÂŠ1f¿å"4Uîÿ4|û+}LyA0B§j‡ Jrs•.d^*R݉Ôx:c¢¤Ú aœˆÉv ¼% 2#üˆ ·|™3Oò/*¿M¬’LqÕÕ+ÊèWÑI f&:8©o9‚KOœTEÉ¡|#Õ\ÀÊ,}TSH³àÆ©zBÃ,ØÍ/(¸ÜZZÛÅ5!@ æ#º~l1Ù!,%tH"P—€”#’ä6I;¡Dè‰z·5›„ôËÓ=TÕ*&2ü´<Þ ÖyŸWcËt'>/ÉQ ‹ÒOy~³R¤ˆ;Oñºn„¤rüPqfÁ€Óx½";u®kPGqG¶ì¹&ÓiR™Âeq´÷å²€F[²smÓŸ +,ŒOÅMþôñEBÍâÐ*΋Ö…¾IçE¨`!)ô´G-d˜N–·t,YjÉß  [Ê. @Ör!^à§ýs 1Ú&(±C&ÉÚôéÁy/E§´Ê€"xë Í@*K0™ËÖjÂ0œˆñ?YꦎßÔgåG¡%±ü÷D ¼0?_=S' f%Qc«š‡IqÕ*ã+×´ms:šž$vÚÿ4ÒÓ©;ߪüÑ’'ÌÅ~3§¸±ðM…Y·¾ŒbÅsÔ”¨ï#> Ÿ D™–a¿¯Žb‹pÎi¶D¿â™có-2ðÕž›“¥oŠz+»„Ðu‡Í;r.€`öùÈî§Yå÷ƒ‘@”¶–×/0m¥ÊÈ ñªiÙŽŸlw[c ntûGt ‚\äy%ñ3Jðv»‰…€mº$Zƒ;nÛ`½óÞÆÙ²¾êýѶ!DOæ²qy†¢"Œßcüê±Æ 9(79eÆh¢“Lþ€0´vŠ0Hz3NÅm`@zOtO¡€ ‚Pê%¥<ÁÇIމÑÇÒYS‡Úq¤°7ÃÙAeN=†[DM*Î}²û"­¸…nQ™î^ßiÔ ­t.\¶¬ÝËÕêD8³GôsQ)Osà"bËÑ=(‚íüVjp‚ß„3ˆÜÅêg,·˜¿ÏÅL°IæcÂü…vÔ]¬3{ŒœñIo “¢qµd­ᡦЩ'Jq[†Š.ì=‰@ä9^)oQHœÇÁÕÙG‚Áà ¥eŒä’Œ²€6±þÙ#(öf2˜2ðÙ°«šô'ËoséØ˜#Ôóc“íàEOœ$¾HgÊ(’'¢Êb{ òj&‘de$ÏÍ"^Žˆa/ò7R×å„ä£Ñ/J‹ Ô®'¸MÐg‹ö4K<¿ZŽ&C /sù5.ËÅOJ1r¹‘ìSº°7çV ¤n Ó`B޹»½„n¯PC à¹wǺAÁ’$›¤Ÿ4ý²f”ï›T`2„ÅÄ@ºI«…¿p tXsŠ‹žÇÈžå£Â“ é`ÁeŽÏi¤n†ƒâŸ(¥/.`:ù‡u9G,†x}ؽ”Šç{G¼T@²fYvóŠ„ ‡$¬éø¢‘6£A,@âp^¢K5àÀ,T¸Íª9}&vbXppn(Ö‘cJïúͨåN^ gF£)Xl&‚íš4¶‰å¾¼0õGôhÉœ+J.Ë‚$BÈ`€ý*çDƒñi½(ˆpM/’·ÊDš ¥—tF”H"Ä Äx3A"˪Á–¥ÀÛ/ñTÅ͸¦A8’øÀGa4–ß*LPì|AS¾»E}cˆ.†ŽVfÔ-°Ù#sµ *ÜKõCÁ¯C$TˆÃ 1ÈD‡â%z/@¸f—¥Áw%Bl—Èæ}úL0’ö¢än:ͨ{o\:(¾½£4œ§æPº’@½B§ÕCDÌHà`Ÿ%IV,/7óH –ìU®Øî(ŒçÄ$W€SB¢HäÿÜxÝ"Zwxš"$ç¼’bE¯F–ùßõXQÀ³‰Á%·1z“F!`˜¼&*„€Šbšú¨¦·òb ÞÒ²¢KRy†}gªÕŤöŒyÝ€”"IÃâ£ÖY ä1=º(ŒD‡d_õ4ð0Eq7(òù4Ý[!ƒp¾yqš› ~N/)•¥¶{%_ê“H!,$ßx™u\À”1?B’kq幌±ÜÅkÒ¥Úf^2l*:õxÐ~1ŠGV@Be3IU€´ô+˜½ó©¤–|¡B!{Kª0P¥oèí¥w8¤öàØ$™“•ƒ((*iÈ%brîŠä)KK¼ÅD³>¨¤aŽ$Z‡m¢[ÈNjJƒ(~U=.¾è7‡¶×B=žÊÁ0¹ÊƒäAF­‚ø1&f‡\è-/bQ›bä‚O€+B!6±Å"±Œ AË’€„#fkX/ªñÇRC3ͼ´òë]ð{J! $gšÄ(nñá°ži†· ixæ((¦[~ô<) O¥d$n°p†Õ:þŒ¸›,òLnˆð©òD§…0+,fí¹²…>J:ÂB%e×ÜqKþϺùŠ©Uĉˆ³–K^™„)a;¬F,Ú®’/0àI~ô«anO=•ã&™$\B7SÊ(ÜT‰P’KV_ËjÈ𿉀k?Ã5?„Æp¬`9P6•Çd.6zÍ:=y>š»CbCú7ÜEé¥ ™Å‘ŠèFÀ…ŒçÙÕÂu´ö·÷G´”VÆà›ØUÄïô äšÆ½0í,LÚhäÉ-0ñuº7¨u¿±vZ÷üÖrî7d˜ÃWŸScRÒm:©»;ä{$†Ú ˜ä¦ÜeÔÑá‹@œv)> þBþ¨@pdoT6Õ‚ÒáhR ͦçtâœ,qÉ6Wqµ„Ôº·©êŽÅ™°•҂Ū7 ž.ŽÙüÒþ³6B›nm|…H–ºí¦O=•‘El ·øQ\¨5B*Á¢~Ÿ¿Í;™ Ø–ûY ¦i’ð“ÑBh5‚S„­Ê‹ÎL¬ê¡! ðR]p5Õí´/'xOx¥¨à$±æ—i¨ÅÔMÉðÛ™&–G¡yçúr4û-=É–\“‡48°%ø û«Kâ"à•ï⊱¢·|Stáæal+r°>`¡Q¬”DHa¶D²”TY…x1Í25WÄŸD‘˜öZªŠbEÄf\«¶‡€ÃÉ?Vn¢æp¥fD°rÛŠ”%ù„òˆAíâ$I-B ·i,ñ[klþÇhÎâ(ÑeòÁ ‚Ô—ö_ó8“¨ÚR¢ù¤_Ê3YÝÈl ùýÒ’Ðà‘'c™¾–²Áì|‰>èže–L‘ü”­)Ѹg1¸ªéHÌ“ÊÇT—V'Dƒm¯¸«taíÃêi‚±y/Ãq<0Æ£ÕŽpú¦bTÞ8Œ¦@®hŽù'µCóa¥Ñ¸¨¸Š•É,[ÍõXPøÍƤ²›SJ†(m™ji|å°u¢))¸XQW$¼ç€Ëè=Ñ’)¬GäÜøfÕ Mt¯Ã~ªV 0@DZWol¬W$NI‘-D{¬‡È1lÖ±É`u4Ó ›À`ØàÙdµ(B€Ȱ1'Yši¯³+ËçTÅ0³‹€ŸqÜy˜æ‘<%ꛥÐ]*= ¶2\$ÿZh6Á1Ò‰èT¢³¼¿«Ð|±ã€n°:ZŸó†LÁpžÒ†›Ð‘hÄ!(͵NÙÇt)z˜'+~i„Ì./FU~>© &ˆ”͉Ź¥DŽ;'ëtíö®ƒ2Ïù*}8_ç˜iRòâ >p¢’œ¦!´bLj ¶!q™П5‡âà3#êZZ‹Kr¸_è j"‚k–Ù˜x j¥yŠÀr&ÊHʨ¹Ò<©@Sé}ê@²¢ÜÈI%¨þ €˜$锌^’¢*Y½‡ý*`K<åBy ­ëCTº§TœždVßT=´üïwŒ|SC€a PÅóGã.ŸsË¡&f㟖GtR.>¶×«R™xE°„½M ŽXಙçÒŒü@ÊÀä-­Ù#béÌ4xR¸0«'¸“Of)ØÅ_U"LQ™ëÅ1Ѐ“¡ºx}â¡1qÜ”÷ÅN‚8,Yémѧvõ%´x%·úÙI½Íœ#ÙX¹Â§ðçî¡ÑêÇÿ˜9&ð«ú†™$º´‹e"ôåIî‚ü%Ž¡ö(˜…´,’6S„æ§z>«ç5xË?2åH^-·‰ ŒÔ( l¬r“'GÍIa`8e¢<<ÔV‡&L6þSÉÀA˜\[v8DÕêýÐØI²÷OØäâQúñOçˆhú¨¡½€ÒëäNéÖú¢²¾L¾'º%×h gkÚþéµ”ÅÜàÄߪ‰¥óz¹ ½¡êÔí“=±8©K¨|:£YêkeO#4ȳÁÙ‰®‘àVZ=1ÑÍAÖAúê&„lÑ_E}+D "9“ŠœÈ«‹*RÛÎÿÒÿŠ”õc¼Ãá4±|–[¨âhIdƒ7†øÅÿ—«W˜õš™É§€_ŠKwã< Ú²¦ 6»d5Ä´²L¥þäGêãäA2“ÐÔ?l9RÑ 2wŠn€½Í°Ó,“Aõ]íÊÇEìÉûz“M³}7µ}^…eÉrIVò N ÒóN‚G‡•‹xº*0)ŒúbÓà›Šm6A½O¡~æ;¥žáÖ|Õô>>€"‹º‹ÓÚ<Âb3ê–y'.»¸AÒˆ±8óÂ¥£p—yAy¨q“$·4Ùߪàþ €@¥ÉãZð+3Å+[¸y8å 4ó[îÙz»ª‚;‰·ø +4u2VÔÈ$tɲNR„*@@´Ì£toÀI¶¼õSY+êËã=Sü+9ŽMòj‰gªkäÃì¯&$ýѲégSQäÁƒ“—/SW*–ÂûÚPÁ±„‡Ñæ*ýÍ %£6û"®aÒ€ÎØ<T³‘‘ˆ^·uS˜œ8BYlZ›Å (ž×N;å¾!¼Ÿá©\£ÏeoÃÁTVºéÕ?Àש1Â$!¿Ì›Še0–àM­á½$T› <¶„‰xÌ‚&bKü©1t±9¸ ÒߊTd#ÍõöSXlrÂ>êì1LàY¨­ezF=>©.¸Áf#Þ)\Ê »‘]CºuqI.dz¬Úž‡d‚4"Âm¼Q¤H×u*h[=^¤s°ð&"þbˆäaÔ/êhjJ…²ߺÍÒ¤üÅ1ŒÖˆ›3lÿš$@í}žƒ©ðΜ~ÜP‚2„!Ö?TˆA2—n59i ‹„Åâõ] «`°[³09&ê¿„F9oÅÜ…l ¾“è.*:!C”üêhÑQšm–Îâu5 ôóæ–KP·AÍbÚþ«}ҿɯª"‰Ý}ÙL Ê KüTŒÍãt3–³ÌBØ_ê¡à£c.dð=Ú£„e·ºxúZL»Q°Ù!ðþWª9ä#Uß'ÝE,HÎøC+âƒ0Þ]©a®š² ÁVí¸õžJY”Ãô¼wVÈDÛR ÂFâ)¾ÖBc&(ñ fÕ©Œ–鯸§‘8`Ħ/ÀÒQÅE¢A?äÚôëpc£ÔmìÅêIé-„·‰â­‘1XûÓ%œ€F-4†âõ,‹#)›ž„ê™ÂE´–Ô ÉR@`a ë4¡$“ªèÅ‚<°J‚ðAD\Q—r;¤ÂGæÄŽÂ;¬aR„„(¹$£ÂÁžä—CV<‰¶Žh‚{Æâ“.Âdˆné^3Õ‚ƒ/=9$yPVÀYª|ÇqV9H F r‡âªJ©Äþ5›UÆ†ÒšÊ ‡|ÒpºJœ€|לQX@«™¹÷ŽêMƒ€#ÊŠˆ6ÆÞ ºbD„CMJ‰Ä"4\üT×Cª² É f‘‡ÌÑž/ªºÀ ×»gtæèÛŠÑuáÖmBÈ1(‘väãTœÁÄ Å__²ÿ":§¹§+£ïÝi!lþ‡x¦Ë~F»ÿ !”ÂÞ;QÌE4kÙˆ÷R†+?ͽ9'‘ÿàÁ‚IÁö˜¿‹ž±¸©¬Jlv°ø"¡O#ç#Û:š™0òœ½VrŸóTåB‰yœóõÓÆ°” õAô„~/8*bÅ“G°‡6„=Eh›HùGÕH`L˜€tŒïº^)º9f)mqî¡E›t8þ˜Xg”[îˆÂÐÇɧ¨óLV;á€íT¬Ñ³ÄÄ·è«ì»!$}ÒdQ@[“tú¬ÖT§äýÔ´Iµ½ý½[ŠÏ?-3K¶(N¡Òˆ)l 8Vƒí(.ò<ûxq¸¦oŒSrÍiù ¦áÃû*}©þ²Â,ºzh877ŠÉâ¥Ô£S _‚%ñD£S]®6wYç{˜î¢"îÌ "A³Õ3D9Õ–§Fwz‘3O–(ï:¡JÈ=µdPÀ³õ}R3€? ³¶;¦rÂÄ.7í ÜPCÌð+d€OS}TžØ=Â!î"¿Ü$ëðaê¢A‚H‰%¡bÌ%ªA –>°÷*ðY¿êÍ\ÎÄé?*•—ŒÊež‘²™À1˜Ç=âõ#¨™ (•Ÿ-MJˆqXaœÈdáQ¯ÒònöÕT E˜ÂKûˆå¤c’íÜ—ív ï!qÆ3êcue°v_¬ÑZ½#Èš÷Žèêk…!“ƒg#--"„™u„åî"…ÀÆ>’pÏx½¯ħo%ÓÅ"¡c}»#¸êš|l§Q±Ž^ÍOCHíÇ©¡ç(;¿Ò?ü…lmû¦° ”ƒøú§!V=Z£‰~à àVŽÍn$ây°ÚT`‹vGË-A}ŒF2á=Ð* ™' mâú©Î ¼ žè½™B%Ü6pÔÔT–zHt£¢wð}—@’G/jdÝä»Ií±F¬-°å$í³©¨sh’(¸çÅ'Ê–Ö@JÖe̪.Ä;sw‰:¯<À}€¦)â_ %þZ¥ ä“#hr|n)ðx"ŽTK€M#8°&Wì©6&$ÐÊLs5æx¡ó w5öp£Ä%éx¡vÁ‡ék+ˆDÚíýê†5Ú:ÉñG("wžI†ˆShÝ8rÙ–óâ§ã"Få+E#Ò¡[þ´ŸðH°Í¬˜¶íNDŸàdø(Òx·otc™À‡ÞSñÁYC’ëÕâj8îwSö¨À./°%ŸéF·®“ä?å50Yq‡ %éTÚÒ?$#õJƒS#SfÔ¸¸½Ëùºq‰m"7Ž*è•à `¼¶7Qµ `µâô¦b¬Ðdug½q°ÈÑ&ɋҸ§Pr2ýšº¸ &îäe:¨ !tÒx³û£SÂDHZ!Ÿÿ<ê:.PÁÝ)KlÚôÅŸK§äŸ*qqS}R_Ý\ÔÈ„™£3ð5·7çvÎéÊ8O‰ïäR€É)ÑÅõNë Ä}¯©©é8ää%þ³Õ?"×Y—ò¼Tá]¬a!=Пu‚}—½ÑÞ‘¼eì{b¦Äô°fL‰ŽRgÍP›LNZ\ yäPZ\y z51òÕ‡÷䓺B5ˆémýTo…žÍ÷Ý€îc‹»¨p­L)#’.€èNøÀ7ÂÏU LƒÄÒ[í%»š“寸-ð·tIÚÚÃÔb,h—ß;‰ÔÑ…Q1ÁµíVªrBl >»¨ÄŸ]Àež¦…®l‹,h/#²°D¤Í·…6H•h)r ܾ¦œáC1düÛu2xKË= ¦  `›¢O™77O“”©:ƒØ³Wþ‡ å||5=ж7vÏ’J(F!·:ÌêOT°^IüQ÷DxÄÙ¾£º€“ í×>¦œI‹¼œöñžª)üÃÁqOd¥ɨö!ù§"—ˆÇT¿šI`HxγçT§t_Ø~IÄ ‡">•„Y͵Ýaÿi#I$ %pr~kúšœ¹fÐÝa¯ÒŸÂ£Ðeð”ÍC¬&xD¦_b¡ ”;P|Ðü‚ÁH|–î†|ˬþÝ*ÄGf 9‰Ý\j¬9²ŸÍ[0~•Å ™Ÿ1lTĤ.²vpUábñX³ÙÅ$´Ç€—ÀHoKLËË]Ns!?}RÊvm_¨ž§Â£áØ8C¹¨#T&Ü8ƒQ²eO:ØÒ¥ÀòÀ'ÆQû§—»UèÙ€á´å¦hÒ„>ˆìÒ7kW #ª½=ÖùòŠÎ[Ì+‹Ê™€L†?UžŠCRq!ÅùÑrœs£²éƳJ,MŒÎ?¢û¬šrÇYÕg©ì4—™¶êÁÂÌàTüš ´ð‚äoSëcoŸº|I¾ûS‰AxirÄöù¨²©à·0î¤{ØÂNaÚ¤UÀ_ ½Ò‹K¡?zÏT)p9§RçÌx¶HàÉm)©4Àˈ„<ÄT>“à'˱“%õC«nŸÚ1ð¤~è OZC.Ô ¡2ØW8fa[úP‚À`?ø¼~(„i‘*0¶Çó©¯aN1ÒÑ“Ã>Ë‘Wý]M"€buìÑ£x޼úµ ‹›=u0¬bKE“w =RaÅ6Òð-·kn*KweÝ¢oYê¤ z¶¾O‚¸¼—ˆ—y«âùI[qÉ~·6% N&?È”Žö¾ÉDæÁNËb™<Èð3Až™!ïG±âžÖì<¥óUµ´œÎagEiä„Oá>QQraã”ôtK©¯KV¦ˆÏpqVih„iP¼öHgjm¯ÅèßMu~­ymP§„ÇÒÖ”#êƒrúͪ.Õ=ûæ´;c.0å>3Õ\˜éKƒ2ŽkÌ̱PGeé ¿ú\u0%ÅQí|Q4bñЩOO. ˆv§}¡øÌt¡CÂ8l±A܇‚¥‘̧6w d•²”çzÚ…¨_0‹ŽjV4‚ô›A¤Èx™þ|ÑÑ1éÿzzÿ=(,l}Ïî¢øh…é[Ñ¡÷¬S#¾ñ¸¤ éƒ=¾¦JÆèR±Ó˜¬P(d–3:ÚK:nDâ»@Û­@,ÿˆAÛ3þ¡N˜’ž@øÑ„+EÖ„"¦ ²T÷‡ÂýlQ±ÏB# {sJYkõZ‹–èò¥DÙÆ?’¡Œ‘yA "M®;£éDóǸ¦‡ >È'¸¢.ý(fR7cæ)±‡ü}¶¤Þ6äùˆùi›Ž‹ýQÉ‹f1:yz™7P{‰íÝIèìž,DzÝÔ›ŽZ<Þ’§ë×Oª5¥’‚6‘„÷M©6-ú™¦¹< f¬KGöŸê¢!6DOeÖz¢­d&ÒœMJÙ™C‡øñC,¬Jr±¿U¹.“äû(ˆËävߊŒ²_ËzÞ¦…8×Ñ ®é—è ›è¬ü‘ ¯Œü‘Qì_æ:žÐÀ”àŠ.Ñ®’¦,<¤‰Ø†^ÁÊ”ó([ 2”"»w»1À›â4°5Ë hÿÊý™ÂR2„DiQó6pÁ ¶€ÑSxj®í˲6rorRTú7ÜìC €`fœ² î(#t â7K°n%ÂòfTfÿè´ðpY˜¼¥(>‹;ñežA ¥N$<žÌH+“/‹ÉB¡xNOÄòÛº:•pkŽ&<㺒;Püú«g`‹rÂIÄÍG¸.J ÑGˆµ.-1yªÀz˜ÜTÚ¬Ù…ù*’2¼¾k* ¦Ã§õËSK#÷0=ÑŠ<ú)±je_vx…e†€q“‰*v)Ö­ïH-!]Lå ,(¢Š«mC—98¼€áP%ˆ½ðÚð;%5¡!I¹ؽåß,Iɱmœ.ဴ͌‚BÒ8ΨP~82!_r½Zš¾RxZájáG“`‹ÌÎÜ«õž~‹Q¨–æÔnï-æù ,ÈòèÆ1B0Ç¡=ï3Ì9‚0-Œšî9¾-ŒD˜Y,X#àˆ„1_@ 2;OäߪþMú¯àŸ¯ÿ„ÿý¿ÿÚ &uý–©iû޳ìÜy ˜ ‘ÆÁ"y_Ê13ôÀ¾ø‚„‡…ÅŸp kÕÊX™ö‹d[Öhêe@°N*Ëy3þŒ…â@z‹ç¼¸Yõ²™ÐÏAÛ`f LA‹SZë!· #ú»Ö7ŸEõT  ¸¦AL:÷@40;¯§q;c!m hÊžß›þðö…õ¶`«Fe’ܤ]­ú³Pà$}oÐd*U‡fO: . +±õ@x^gÃéOÔs¿&šI 4ÚžM©Y(® ÐöœÏhlUëà@ÀŠCæ…þª¿jƒô¢ô…~¬øµöÒþ¨Ì<ÿYÐ03Û©@íh†"Œ¸ÍÈ-·öÂÈgLq<€íAó!À„JúF+Gè~à*Ë;=AǼËÏ“ÑêÜ55p´ÒwU’cŸ¯*~û@£9n@/cý½4Û=¾G@N+ž}ŸAå»ð(A©qíôH…½vµYRŸò ¶€ žÃUòŸ9H Çš±wì1À”ªJô^A æd¸ð-ëÝø#|Öÿå  ÙÍ0²û‚ý:À'Ý[=zù#è{h¸N…œM…Í?b²8¶Áh]ð‚þïÀ±™>­Ø–!Og @‰—3ÿwð@¸Å|×O24¯€o€¦NuÒ¯X¡L£ÜáØ/Ž—‹Jóùi~Z_±À ‰/ #_x Wm€3ÂZ¿ã^ÙGFG®÷¾@Y´âcÃriÚ)M€ /Œ€B¿¶ÕÑ¢]<ü 3QEŸEùx7Ý>?&<‚$•€E“ ×;¡€s¯*ºü .ò9ýÓʹýMò+`áàSݲ:oãÖZöÓ²Wª›[߀´Mÿx…öM"¦tp š1š,RP]ÆI…‰9…`êgF&Åî > °$¡¿Yžo´ åøO™1G¤˜×ÏgÚœˆÜ»Ò©­¯WS —Û €/0@’ðMôgº[t Ab]|M-B•z·4%PkñÛØüs•AC#Ç|\O‡…ê€e?X¥t/ˆ:;Ðrn8–(x-|ôÞvõlH†»ø<)¢nhƒé±Ë܃ÅÍÐ €8öòLxJÄ/{ØT(Yn` (ù 6ú{áÆŒ³Ç™áÞÐÝ óM ç÷ö;‘Ï•P'¨ÿ€XY&Ä`uZÆÑáb»åN»Z éÜ"„} kˆ,«¢zéÿGÉõW…t)Äèo`ÂýM ¾­)áߩm5Ö™—d‘--ØŠÌgî­øÃÑ’Ô/važ‰P£·ìZvΩx‘§|ç«#gsdŽÐÂÙs`<;Ú Üdzh·ß–Èí_)Å˨æâÑíAûÖ:ž9ÊÁg;Ÿ•t–×6ŸÚp`ªõ%@àqóîG>j0ñ=½Âj[•PyÈA9]>,"J ¦:WNÀ*–0:1áq‚éq`6±¶"ÕÅ‘ˆµs[Ü´œ€à;)U-Œ¦Ž¥ü,мÇÀu~&@M ÎÄæÔâÛ+s¡Ž¢6Þ…ùŽ <Ò÷XTD!·ÛäǸ'Ô—€%o²àœÏ³Òq¬ß¦zK¸ nr™2Wï÷,Áº&L›KØ4 6‹¸å¡ø’‘?m×îÿ­2ùìä–»õ <ÑlRî-ÅÉaI²¸ ¼Ó³Ÿk ß±€0ÿÄ+!1AQaq‘¡±ðÁÑáñ@P0 `ÿÚ?ÿ¬¸8Ð"^ÐY¸fŒö *õbìã¹²ßuPB˱lºg%Êï‹ý~ LÞèŒò›î#g}n}‰'ãO×Ôä5 “ÿI©Ù$¢ Dİ\F¤.€˜˜fmÞÊs¾¦”û¦3w¾ …´ò£½ÈFͶÞH«+ćf\‹„ó¨/Ð ‹šaõÌ}ñ~§;'Ñ“¨gP$DCı/)¡  Í‚å²$]ÌNú°Tž”ÍÁkÿ7Æ£8™z„¦Ruz¨+’*&\ƒˆ¯)럙ª„¨(›*$P`eüéùD2 øC„ãá­µf*(Ñ ÜÆ—0Ï,;ù;ó›ÄÆ„"¢£`IÊbOŠ\‡Ë)úl m6qs›ð«oKâBq–<a|èR„:˜ïHŠüBöúËÙ¬êSöñ¡GdÒÌõ¬`ÉM<Ò‡Ï9àbÊÚ23ø¼éíT" ïÌ!•iÑ-Üc•ÞJÿ΋º¾AR{§cN-%•,µ‰XeæÝe‰—‰Áû*p;Ù˜"Ž¢'J©[>³°bUc2KOA‡±?q®_ÓñˆÞöÅfå_0¿±«aøg«|ñyÓÄE*aܧnºÎž1^ð=ùÓÐÙü•F™µlí×½´;w©Hr’ w¦¢Ä ƒŒÃlAŸüä< P9AïFÂ&é, (L–1ÕßÍÿ¾g7©¡qš/—ÁÌFÈ©òùÏË»Œ‚O‹ÒJþÁ¬ç«¡YÔ[°{"#…Ç3‹ÒˆÈ•¡Æ’s`G–ÿ‡­Ó p½‚GÞ€ÉÆSûžÏŒÞ¥Á(0â +p¹`¨á”ˆžGþs,À5 LØ ^508 È%BȰbu%m|îN'«½É+û1 Ü+­J@‡'g`ÃÀï¨0y¨ùoÞÓÖ§dl´Éng»,ÿ:Ÿ è EÀ“×õ ²¨£—gìñ©ðÖd¼D8+gs©ÉG¼ph“’' 8`£=­ þS¹¼æñ$fæFÈfÄ,$´N‚­Y“"PÄ­?ó•YQ–B¨‚†'f0b¤Šl]€€ÆÁ½ "YHôÌ'ÉëFÉ8ýžîê-É·òÅ-7Ïÿl×ûœjÜ]M? ó·y´7bb&çnyÐ)s×xÜ®µ;#, ê/ßÌ~>c@ØÏoùòÝécן÷i¶z"‘Gi£ Á¦«„*„à¿ùÎ 1‰Aa0ПV©É!™€ .##("VG»õ7 ®@ù{žétÓ @®ÖZ³÷©)G¥ŸŸ¯Æ¬W‘Ýwœæje4yÞÈ~ÛgI™Mãÿ‚Á,G\q£3NÿÇõû6;Ñ€ƒ(J9ܜ֔¢‰bã¸ÝM%§ImŽPð¾MñÿœµÙ1¦€1 hÔ³sgzr¿‰ÛKÅ&ωÊ3ZM ƒV톇åø#5"—u ñ'çRÓ‡_Àu,ŽÄ¼ÿ/~g{?¡?Zm㎯—ó›ÖÈ~±çÇœ_Žy Iæ?­¹uÝÖj#â9SÜ1õùÿÏ{áV4[È…LN0È‹å?¨É “ûÄk.ŒòüÂþÇ“to–yó£åë‘ñSÞ¦s£ LAê~ìcvÒ&iþB-Þ¢%`˃ô_»ŠÓ`NOÙÓh;(Û©Ž¯RyS8‰‹.½péÛ–6 ˜ç¾küë±í 9¦'™…Ô.û†…¡‹*tdM2‰0D)BÀ±Ÿ±¹¼jŽ6?)?®ßðC9Ä£Ž!bc½aG³ñÁûjÑ&z™‡ïYô¿×ƒLz@;?£Db!ÎáÏ{Iüë _a?)ÏšÓNš¬Èƒ„éQréþ sçÿ=ȱ‰†$rJDHM?ÄÔ¹dö¢t‹-•@äp[¬ÅvYê†{üsLÙW‚õüt¥ÇTq¾Sžçè_»ŽóôMøIõåäÖü?…ýñ¬€çq“Á1¿•¨¸vCð§—êMdBùŸÄêrõVfS¬C8SPÏà7‰ïïÿ=2i7 ‚&AR†Hu-Ê'ÐbËeZ–^Ç;&ƒ‚½ß,oújè$úO½yiRï|ï?-ð.áϨŒmÁMh¸AŒmìóó¤¼%ú³ûZ•D²#7„e׬ºT„*\$ní7³zYP(–1¼cQ¼ Aµi„ÁH?ùåêìPC00ÚÅ}j`i5xcnÚöÕ4¥6‚½û*[tH÷ ípÅ×ÝN¡rÏ æd±¹i]¯‡3˜ëöuü™êNÕãP‘ó[3SŽy·[ˆ…—‰þuÚrFàmÍhKqŒ;‘·:¶‹tXD±‹; ‚c[ ™™ÓÙ;_:s#ñç–4h†S-fk5¥‹Æê2€À‚]ÖìëìW†_âõüÈøæ¿#È#>â?:ÜOGòŸ¾~x¼gq®¾$´Ûå=Åëhwœ¾n{îñ¨§è¥õñ¬’?ˆ‰ÛvE%-`¥¡¶F˜á„2Yy‘oA;—Ô?óóÄiÕªìs:M‘y0 ÊÔ†¨ä4QbÀpÚ„á ˜œè&ðvýÝš PÆ5N¾¼úßé×í¼±Í~o_©þúãÔþïoÔö?#PÚãÞ3ðÓ:еa°DdS ¼Å„`Àí8Ñ…›œDDR˜ãùÿÏlTUÀœ‰s ÅP!4D@A§Js,†%’+pÀ%`=CŸˆñ^âCuüôþI¼kfcÅ@ÃøÍk¯ÈÞt3Œã2~#\K™o´ÏóÝ’Ãê+ñ'šú­ ‹®4JÌ´\Ú]È(œ‚B aj™umÊ!‰,¶LàCž\‡Ì RŠõ¿þyKÊ…a’Ó_8Ô€†Œœ7 n\ès…ò8PÃ2—Z\Ž]_ˆÍc]Ú ³ü·aŠÿKÇŠÓgËg®¿}šäs¹›¾žé€a9ëùëÃJ]r@|úꜗ9›Ža©³Ò!$§)¤%¾Éf¨aÜŸÁŒ~Áÿ#p³Í¹cŽ|\jSP’P%£P¦A`$S™•, –å×é_Ö²‰ôŸ“_ÄüŒWñ¥Ü|?:áß…ãˆÅkò*|÷ßÕ’Á2[™‹ ÷ ÍµwH•Þm5¦ø7ha Ž™KÓIþÜÅÒ ÍÂì˜ý=ß1·þy»°í‘­I2EI’‘ˆœÑ<µH ÄUs{NÏü |ÌS×]:êû?½fÃ=ñÆ™š~¿1¯89{»¯;Ì3¤0"S$dåh¤N¬Ï"o€hv 5 ¨Ä‚^JÉÔ¹Âë “™Þ7q{Ê aëã×þx™Ž¾ýõ8qãIFKHH–BâDÎÉfŽÀœÂ'¤¥êd¿:„™4‹|µ©fo33‹Y4ª8[vp—çLº\D‰›ý6) "RO$lÙ‰MÒ¡p·s7Îq#«xX_“"ÉÅ&‚WºMd^ûí³¤I¼ÛzbEÛó?óà91¤ú ‘à¤;±OÆî dKÇfs8ĆgG3ˆ3JÒ¥ ^$ri±q’Mæý2è¼þY?]ñÆÓ:–¤Žmâ@WxÓ‹'ooŸñ¬<þ7—BuRl°ˆ®ÒÆ‚K´0 bÚXkœŽY„ òb›½ , ò“Éçñ‡@?#ÿžÈ-„Cï[K¢¦zN•ü@ZÝØP/eñüº—.jqXÎ4‹löJH§g„Õ·Ûeß|ÝÞ˜¬Äzá1nl!¡¤XvùêûæÍtiBáeÏ3PΖ·¤Î­BŒØœgS·À­Ý!aî ÈèbˆÂHbœL…ZãÍ TbdhÙ2¥2rÕF'üõB“2NwP9¶4ôÒÝp€Ñˆ 4#3ž† ¡Xþ](·)°™ÝÛ [XÀ¹+B´êTÄr€i òÙU»ß_9DÄ ?·C+€z–|hæ—ˆ•4gTó8ÖYk,ìý‘³ÉØ9m$åœþcü·ÔÙ¡U¨¨NBP-–LQ n,"ì¾ÛؤüðEÙ`ª‹Z–T¢! DÈ Öc|xä?C››>Ì0nä»u ˜JŠ!•4(Ò’Ÿ1Ž32w1´2üG .âÇ‹¶> jxI*ª!Ìk!Û Gãiðú×¹zWºœÄe¼h0ÎÉåKuåNŽj¶áÚ1dw¢ËPPXÌgÍI¸_QÔÿ缫â#ó¨hf L‘±àš 0ÍÖ ;R ¹ŠÜÁˆU† ÎÂ,*dnCsN£y P˹„7h©@ý™5”ÒX–$o)f!&ô-ïSŠa&\‹–]e­$*ˆ`(™Ñ$é-. !dÉÉTˆ RÀ..b”˜!mºo§{'åÂ;Çâ\¿YÿÏGb•T"˜Sï^ $ D¬?zºg3¶å}•zw+!PIÌ"{“hˆÐU4ŠB¨¶w Ä»jBTB—n¿—^ãJïûît¹0`åØ@~}aÓÞ(g0º„#xq2Àc$¸*¦Ê"ñ§K)2ƒ@0m;ÄéÒ†2Gãÿu„‰¢#ˆSëGaKCJ¡–¦,aLh î_BÓ<Ôú™éÄÝ~=—æjÇ‚0Ø! e´Ù¨éd©))¸tÚK5ã=q”ëH$‰Tð¬$~íÿ˜Ä2ºxQddÈ”DEc ˆQp„ˆC.dT £KÑŠ,•DÚ2ªÍ*‹J-2„ÄI-W:˜H¥“lÜþZ‡m1³Úë¬:CÀa8çÊ;iøB&øà&w`GšáX†mÄ>r\hÌ’ß8îoJFIˆƒ”F?ÃAÍÆpv 0A%’m”/ƒžv­%£0·fŒœ:ÆN”ÝÙË D\-1Y›çÿ1˜c.~ŸÎ­æleìð¹þj’{ fw%æ œ¡XÀµ]Ðd–QË“‰ï(µwÆjBŽ/BNrÖô¡Ýø9¡ì€Ž«ã{Ô@«bâ.¼3É;GæÍ½éèŽá7—<¡GfõÎú†©HÞ Nèk!<Î$ÎÏGäiΥ߀~œðë‰ÔÄ1ÄÉÛëgLÁD…,цÁzhé"xd‘L7ù`  ]Hñ–I{`õ3ÆŒ©7Ò¦ÛÅúÚ|Z$ÞòâåëéҤ‰Áµïþô$@°DçAîôГ…ƒ¬¿Ê{Öú>;ù½KÁ †L¦R½"(€•bFؤ²Ò«–@c4h"È+7J[¼Ü'Ù×)Æ¥ÏàÝ9”˜õ¢Á=Çò>5®W$ÃýmÌÎnÉ5˜0ÛŒCÀ·pC\$ÆR–\æ£$Ñ¢‘KvPf4겤¹%dB°ÿä ÈÆj£o¢§†‹FÍk¿OaQ2Bc3âx4È%—&ñ ®šQDÜ–S‚Ç0 ÔÐÖé ÜC™›EÎML%2=Ö6×íƒÆÓQÖA)ç–jpoV#þ%Â<,÷g.ú‹A†Þ¸|i§Y!39ä¬Â¬Y. ²ú}éÀ.q¼ÆqÎ¥¹¢æ»ýø~uß¹}P[ÄÌíˆÝ±‰ð7œfO„–yŸk7ó§}QÛ!ÊÅ•NÒ6MÉç84Þ¾ã¨çdÏÖÓZT¨2??G-èºî”SÌKþø@ º‚õÿ ¸ýó¡@_¨šÎ¦ƒLj-ý8×ûüÎ/]_g÷¤T d>·ÆÎ”R´€–³€µ W:¼ç&<† „Xáb"„«½m_.=t(ù‘оšìúþÚÊ&‘‰å`ç}EãÀ‘îÕ¾e|j˜Möj:uç¤@X¼ n¯m’/màÇÆ\âr%A‰6ó‰8^/ŒQ÷ƒés¾¤Éòk%ÊåÍ ,x|èRMXjÙÆ¿Êýòw›Òy²¼Þ´v_tRD`.ë5¤§=é—tR!’ÝŠvãB<œâÇ^üÿÈKê ñyð_xɪ˜ÚðfxFð“Ô!«Ö»¾ŸëK»Ò~c÷΃Ž?Öco¬Hu 1"jÉBå÷7®EÁã–>Ú„ÿ°Íƒ§aXC8Þ*±ýhË–‹ö ÆúyYðœj…IP­œ?EëÖ_àÔ¾©0ë€ “Q ömu‚¥9Ð33Æ{Õ°»çHÚutÅ1å§'Ù¹‡ r Ž!•>b¥›Ôìçe' éìöˆÏkò-žï7Éi†ˆbÁ`œh,Á¡2ߌÌSõ¾ú\ÆßÏ®ùœÞ¬>{çrïDýD a¯qCtÒwŠ6‘îa@?í z?cPê%ƒùõ¥V[_ù@¹‚H-$ä ®\{ÒÆÝD9˜†\ÑÃ̺…~ÀH€$î6Äã]_g÷¬}ä?|ëõchÿ8¼è$I΋65[dGzß@ L.G‘~¢§gGÅnñɰ7­9)ƒ´m¶´ˆLb'n9Ž?dÔ´V^ñq‰©ücR¦ËñœÊK–ó®í½Å±´GãE¬©D 5Ð,( ­”Aˆ¤ÝÔ\‡.ÀwÉŽ0çCâ¶ÅÞ›œXó-0ž‹³mé$Uå„3{©ÍÄIZÌ|­£ðÓøJ¶,f=€Á7a¬‘sÇ:*2³¬G>5´pm´iC5éÖöí/ëBT – ÚDßAœg@®°èÌ‚P7ÿ²$›³qñW¤¥Y_ùˆÝ±Ïñ¾–F1*ý ­àÃlÃø?`Öâ³÷:…ö¸ ^ãmáVQ¾sç§wKªªŒ`ì’s„»u°¹Ew$®¥¤ÛBH§=Éa"›BÀÄéçbbÆ9÷¾MO/Æx=Iÿ{hÄ/;ŠŒÅùj„‹ßFäž"§3‘n¦f”]Å¥í®ý†Ã¥m0¹ÉÔ sw&üê.ã¢zù¼†·é#yÓŒŠ†ðÙÎAh /(Ë Á§1~%ù5ûí£f/Özg÷ñ§áŸ“üýú×WÙýêÃ/=13õ§â[<Å»nxÒBb O¦$xM¼ØU‰Úùð¼§×zF'P!‡1_rbéúõ=·©è¯h¿Ë÷£KóoÙÎúe_Ç_Ìâõ.1̳ñy}u}Ö¦•i̲óCÚ¦“0vSY™ˆ¤é[Ø™8b+–wÔ’ eÆÕ)¶53 ·ƒæi–+Õ€VøÃÑÆÞIÒ² ~¯ñþ‘¨ÛVçð)ï:qÁNè6Ú{Ö-S‘ØŸ¦ºÁ},üŽ×n¬”J€`:ßÌš/Wvyej1úF8ƒ§P)«,‰"ÈÔÀƒ3œ~Î…¥+0Ÿ…ÿu4ä‘°ŸÄÿ|ñ„O/H ”n™‰¶(ïüùØðô"âYS¹'Þ€óáS#°þµhH3a“j?öÆ8^%&˜¨#Rˆ™°«2,Lºb›ÎEÅܳ¬ª}'äý÷¬þ÷¡ÚÚÈË©y=^–*È$ˆPDœ;äÒfÎ|ßHc\Îb4B F SšXÖbXMCs[â–Åñ¢Î3[Á|¥ó޶ÞtM˜~“×Ù¬ˆ|¯ÒÔ¢2A~'>4vžI€Ü;äï¥,ÄIEÇl€ R©ŸÈù´EF„ÿ¯¼ªƒ7p§ Gž5BQBc)[Êé› ´Ñ0H).ÙÜÆ†.›‹èhãO•¼z£Wû=à½:* öçc7‡}G5³Óbj ó©ÄË|úß8S(Ã$äk}›ºK­­©[`b/_—Ò‡ÇÍÚ¹m0§ ½D=oðFY%ïZF J¹Èm1wa-EX#¶ÞVä3ŽbqöšÛPz!@žgŒëµ{vÞÕóŠ·Y=ò™Lÿª1.,cöe§#a·èžõfžÁö Éç½+TWÜ)þ«xÔ.‹b¸G´Ë:pÚM¹=Ç:ÈF›Á|NQf4ÕX°Á}Ç+œêü’nk$‹ëyô\ŽÑc™óþÌ™ŸP¿…:D¿Aœ·Â71ºãËÝ»Àì½öªÓc®Âi¨C¸Yµë¾Ø·às¯˜‹rÏ-§ôÛRÕ„²•Î!±Ãej{~—QŠÑ*Ì8ó ÷äÿ­%¸Û¿ß½DãUNa§ZA)(Y9 *1 JÄš`bÆʤÞwÆüé´Àå;72ðFÔŸ›Pâƒåj9ÑqvÓŽÀžJ,í&Ó™*vÑpùSðÑ®ì&÷ŸŒ‘¨¨ïm¼õ?k‰ò«ôš#°7cÔv¾‡ñ@Öåë1ªTŒÈpÀ…Äè¸\ösZ];VWJJ3\r,¹ÏFÏ*‰>{žM*eåË!ú¹#P‹˜6w€Ú¦€ùѱ,2TíHC®—v›­ ­Y톷x\Desêf†Êþà Ø33º~ÇãP1»ºE <ãP¿5tÔ0j$3=h¼7«X¿øÓ™ÜO,ûýqïðq”£Û8ñl;«ϳüνM(ïËsˆ÷Sªvïú7]ž'_Ï×éý^£Cx¨¼Êq£ÄˆÀÚz!q[˜Äsýoiâ^>ÂXùÕ‘[PC•¢‰B ³¾ âK Ž!ó[é™›tÙ…üýÿÔ…QZ0r0H%­ÐoΔL<\…˜3ŒL¡‹~œ¾ ñæÛщ¢—ƒ”›à4Ÿ:ÅH¢œÊM{¯U«…·Kž¼EKB¨ŽvmÔ8ˆžëð‰î3‰B¶ºNj"}^©çÉÙr8y4Vu!º÷w€{76ÆŠOÍ@›cb<,ã7£ NÛV[&²/|öÑÆu“x!óO§†õíc'#|· ¥h†/a¾€¶¤±£z„³0UÞ–b Êïãk ~'k…~çSÙ¦ÁÎþw 0âhÂrù"u3”ŸNÀ‘/oüÜ„§p¿vC‰%³=×äÑÀ…0ßOmpBÙû~ß681ë©Ñ 9"Üž”y­ ˆpÀ!PB$îH’ZšR™12xü;Î9u³ßìh|œÆá¤AÞI/˜p¯ñÔYcr)8’Y„åæP‹nÛ5YÅk¬p7µ>ÝV¨ên§àu£.|Î9vÆ“oÜ3·Q]4Ò@ÒsWØDó\œÊ5!,è…1‹§w‰ÔãþÏÜò×W¨°šÔ\íotñX0IüŠ‚üÞ“‡ ÇšÃHòÄ๿ !ä$9¨GæGŠÓh§„ŒºsÛŽtÃ60a¸…„Cµ4€åû‚N¦#jŠj»•QDÛÅÝF…p ¤S$µÚå…ü?pÃ!>&ãýø¼è°!13õªŠâ£ÕvçžtÐÎ<ŽäXpan‡¡èluã+ Ã“Øa—†|mD…Ò]`D£Ìüèÿ!D„9‚f&‘RUn”8\w7ÿL O“Éÿ–(ˆ„¬7FY^ÝÒ.K|Ç«­5¦pc¹Äh–j†<¬™Í¹ÄÌÛ;4.yW{h.’9•ÀíÊÚ4&fX·ÎS5§I“}†Ý”pù´ûé!íe¬§îoèZ8l–xd¿äé²kW7Ú~úgL>ò uxêá5džN9üj¹Wøp綦sËã;š'o$hbs!b7Rò ŒcYòñ>Ð#ÂB8ÆÍRHx³$øœÓ©X@|}rŽé1DÐOWŒ;†î¢ÅP^Äߤo«ƒri¹w¨õB°Y±ó‡¹‡„zpY7Ù¡÷ñ½àá0¾ cÿ$«mg“˜É‰×•dU²ó¯1¶§!YJ@aAb&†"ZÔ@Â0 UnyÔ9‡öõÕözQ’£¬QüšEŒÞŒZfiúã˜ãQÊC‚ˆÈu¢`ÓÆ;°ŸoÛ«¯Álßa § âµìØI   Œ¸Ë£hPg/dמ洧 |ÒÏÁÿ-J@ bÂ}{˜Ó˜”ìÀÀ-l§X“"qC“*;^$<¬“d€®ÖÒóI<67'O[²ê#‰¬E͹z·IM §tN̸un.àù“×} ÚDTü•b+½O­3ý£oZ=C€š«iý:ɨAÀÜ*Éø‹Œº“J35"Ùö­Tº„"o³)ÅNÏ:©díÎĻܦ)¢C „–.}eç½Ëa9šì8˜—‘ß‘ÎÍÒ5ä2eÄà³÷[)<2xL RùsÓ°<8ÇhØžó!á?*oÇãH•éþÆ/oñ£å×g\ð½b£N·¹þ…ÝN­q' ~U;Ö‡±¾,æÅïBl:-ñÑ·Y½,IŒp¡—kƒHÒˆ„L`A9Ô#%xiÚ~ûÔ\D¿ÛH4éÅ7±Üq9ÛIº¼¦€ JWS Ù»›ßc°x9V¢fO: ï…áy±×²ÍG¥…Èa&}ƒv´Ï ÚD$nW„u*ä('êþébþþÿýsÌq«å/-°º¸Ê3a¥É*F*c)0|Æ5ä—\5sO{ÆvrîÓ¡ Ë­û¯~µ Ê*nÌ39–=$j]ö,n&i¹ñ,È.Ú\“Iœ–Íî¢ÛÑ<ÌÌæçPË?Ù¿SN§ÌÏëw°m®K¡nÏk‹ôRI$GÒ9•Þqªªe§âSÖ÷pê;ÑÛà²}GAË"·0•Ì^ý–ilÎ,Pz–µ®g#a~›IJMÈQ„N¥½{OÕ^7—YƒR·%ÄgÌV¼9 A¿OÏñ22vÏî=9toA–ÒžÇZ}ß–âd‰;nußAOt~HùÔÆl¸àfóFåΚt´¦°D˜Úxt¢È¤,2"€LáVbƒÁïúÔ³\Ïñ®¯³û×Ùýëo¯çT@Gox—ÛQ^"ÐÅH74‰¿‰8ö4̉&N‹&ÙÏÒ£d2 n;ôMôLC¶¸qÿK¯ß½ÉóýšX<]8¼8ý“Us‚‹ó)âÜUi2ª’â·¹™gn Áã[íü/ìñ¡¢Ñnœâ® *Ú3^u·7QÎŒÑå†0Ù™¢[‚£ðà‚½‘©¯x£@%cÔüSâùN.·¯Ý±þD3l÷ÚiFаÛRø,`LÁ«RÊ!Þò½ë6¯üc°‹ÄP!•g°m¡eÎ?ºÑâyÌx?Ÿ?ô¿5ë¿ãDd–†FÄ .c2zûPÛÄÇ­&ºìGL ø'½´Þ‚bs\]P t SwÜÞ¥æÜGO¨¿—8‹µ¤ŒÜ`$1‰I …¥1Mû¼Ú=/<:ýýøÓá6\,Á;β"±ù&þmÀœ#ÌŒûáÔJ«œ™Ì…é²t½Õ­;Iû_ɘ.ìßSÆ{ö‰™Â‡ÕùüW:öÜræfŸ»×AP|q0rÞ¼ ÀH~_{ëÐJÓÉ>o¹ ÅM ×/‰ò5ˆ›G'Ñ´’F¡»~?¥â¥ÂXš¹ÉMaŠeŒË袧DT0ê^Mó˜ç ãÄmÞ/Fâ1 ² ¤6‘ÎQN,°\«H#Ô.S[)ï¯ïÖÚ&™{ž¹5’/õίwóBž©¢õ¤S Ñ(‰‘c…(1¸;:cPCpˆwîÌe»ÓRÄFàÄÁRйU°qWŸpab0©µE;xÿôp*?œè³„ç®9Ž4e$AœÀg|8 »T8ðþSòžuðë¬zÓ¼£)Gx)ÞÆ’bNÃçs“5\ï¨Â!ƒ˜õ¸KäJy¾±q¢c4@~bA–Lm$£ár ا(e³—™‰ÊIEõ§p€T„$C(.Z˜#mÏ (ÝM¬½·2¨ÁIíø,÷©ðâ’’u©àvN3Ú1˜Âãªp+ø8Ÿñ¥nâ÷9ým©[Üå¤:]òÓdq¥pÚ$ð—=zS¢ˆî•œÄÀo.›ˆÔ÷¥ àéÑ:¹ç–]3 ghHö¿1mH [nÜCú“XWW~çÃèœAŒÁI÷1ÙÀ®é"½ë» £)•æ1Yøš0m˜’Q0Ì5³9øŸ«ü1~t0¨Žâÿ ¥p¼L%ŽcŠÑ¢¹††²È& Yba+,öê\Ÿ8ÚRa¹go,{Ïu–@hÖ˜ƒ†bËÌ:™Š+39e÷Ä»q©¦dF#;qüi*±7Š<ñÿD·ôÿ=󯓹%•ú–”õòÅ ô7bvΨ¡G–@Í¿F Õ6Ù"iäŒPÚ(’R[”ææöŸ:Äß(\¥I=ƤÈPxÁ¬µã««¼.'1%íã$94¦Dœ’-[Ô—Œ1wsDL œ±%˜PLÁIHÔ’±Ý•æþk†zê“Ú~(Ñý²Fö†ÓnBSnimȹÝëleü¯ˆ½Þµ•¹™ÜjSö5)۶Ъ0’JÔ̆xHœf |_˜AãI¾áµ—† bµˆCëü«P€á3ä9e’·ÔKQ…OÝüïE¼­›ÅyyÕÓPs}³dpj¸#zUKö>D*GuÍ n±¥6m]ĆÓX®uljÊxFNd<ï¤O,‰Yíwª›ÂêA“8BÖE†ÐËx Ï÷¿ÃNd!Kœ¡j3áÔ¾©bæQÅßmN‘;Ò^Nç:‘ª9`ž·"ùÖ®c.^"õbjFÄl€`¤Ë#£¯:fJ«“ûÕàvHælýïDTFä‘‹©ÜÛþ‰ß÷÷{àOø1J»€Ì_&lýÚ#ÀÖÐ_ šqŠ7Œ»gp^Šn '¡ªúÔš™‘8kªœÞјJ+ûÞGö… ˆÅ¾6qKâØÌ<îKQP,Á·aõ:°z¡¬W­0Q#~Ì'übÉ*ãýJºØôX0yNS‡<ÿ´Êÿ%½>hç³S3«Ö7‰"LË8žt€ÉÐÞi)æ§ÔhîìoA—zf.5ûv¬“TÇ Sû3?©Ñ”Æßi=OmédDå–/YǃÃí³|ê3QȾÀ!ò1‹Û*œü‹.c+²Nk¦ÿ+ΙŸlYî¿dsZÁªÂHcÃóÍèY'37‰¥Þ)ZŒæW×'ìØ¨ŒÄ—1Æ™uú½c¦ŽãòÞ üa$WëÁ‚2\¸Y„ïqáãX ¿†fù¥á™èêö]38Ôt†ÃÙd%ØN†¦q ˉ‚J ˜¦úHTåJ$eØ‚¿½ ‰ÁLMâ\NdF[%‰Ÿ,V9ÿ£Ùôÿ_ñNžQ¸;·ÓBТ@™šîEünIm‚f ;ˆó™VvÛP¬Ã%±ºß­!Š~+ôü°áÕ©r ö[Þ¡.B<‘ö µ—{‡§ó¾„8Ln…ŸN¿S3‡¿™«Ñ Ö&,^<ï‡Á˜vÎñ´~½j ô—áŸZdnxÁTüwŸ:”?Yßsð±S¦ÏžG<·¤V€*ðÒq¸ºV{ÏÁô¿‹ZðM–;àq×Þ„â þ^4=-MÈõ3뜱#ÙsT6lêšCñDfŠþ;®+é­Ì˲7­3ƒ¦.gö…ïIg´€=[;ɶ—=sw×9Ö{EíM£"¶ö‹Ô\‡Ë|F}o¢›M‘»ÀÞ"¦Ì³¡r*¤¨ÐŠ WÖú9ãR§9W¾ôê øZæ HÔGḻl–[Öâ(@AH.Ôs Ë:gl†R |O[éåfA&"`A€7÷„-ìq ¢·fçI˜R0mË7sÚ:<öpÛº'fÌw¿ý50BAL"Ê2Çz‘PÉ&fLš]à@Ãw™,FQ,+'i†6oik­Ø|JU­F2W"ÌRZ¾úÔ$MŸìkœlp¤ JÁ bÃ)ÜIè’bXµh *™àn‰Õ_ämðÖ8Ìÿ‘<¡q‘ù8@ˆ„a9¦ÄѾ¨:n¯V.Œ’Åèwwòf¹néíXÄìéÂw{[õsõQ'R/ÓˆKJ'ì-;ã¡S¿±ø<«éΩ'xÏŠœqp†pTîs¨Ã¦A.VÈ ÌNw­YQ‘ÉÌG¸÷¨Ið3ë|«:ù!'˜þ›I‚&>¬ŒO.› AlÖ÷Ÿ-±®Š¦… ¡¸byºGB ˆU¹±#%UJ¶vó,FP¢<‚ËQ1&#+q&xf‡Štäf`H˜B=‹šDµRР”ïÉ_ô:¾Ïïþ‘Ú`¹Dd&Pq5¥ÓR€se òÊJ˜á£WQo'A1¤ø|¼ÌÃ3Ë&‡`ƒvï³*ªw•êpípÞÅŒ_—ì{ÞdÔ´Æ*0ΣÇ,bw$ö;.Í—XF™Îà6›Ôǃ‚æhN(Éè=¿’1²{œqpèŠþˆ!…Ûy›ÔdxÞ~‰ü£}(è1àûø½K„./s=¥Ã´ë«íM¦ðÇô&–L[¬/¨D7,É$,“ Ä’3;äN†à–ê±–f*5 ¹¸ïrsSô;¾ŸëAÅ?_˜ÓuPJæ0Œ\Ö±9Ôi0L¨ÉbQ…K$ã•# ‡ Ž“ç AÖ´­” " ò"y‚E—¢\w1B;ØaçŽôûyÜçxÂ_;üUM#ì7ò6™fMQÆÄ,õÐî—Yñ¨}ëºåìÊw$–(À"…xÎÔÌ´À ¼”µùªó¸Î…‘ïû ð›ÿ:gaÀÏk˜—òÈÊyOŽkÒ3é'û×NÆwÚ™•ÚEÌ-tÌ!£”§5¹{Ú{­K¯k¨Ú^±£«°xÕú,»¬GƯ7&ºDEL{œÃM¦ç™©ß;üo¨þQr=°×÷¥‹ì#ÄW¾aî·˜f"oBÓb!eÅ}âiíöÿªzSj›Ð`ÐæeK㯠ӲÌÌZÍs½j"-˜ø ¡æzÌ |®c¸õŠïz‰Žùž-s ŠÍôȃþX ;ëí˸aÅß’,½K«2€ïî>üÁK¾M"øËÄŠ „ŽЉ–ÚŠ $ ˆ’Âs“˜Õ$DMª RÌØ©²e@â’3s<1üj ®µfÙ·©šÿ¢¼ö­Xî#«ÔÓ3%—2•£sº<èE8Öfƒ áU ­ó~@zeË$i6U,J–ÛÖ¡Ô°(©»"Vɸ&"¶ dr‘: ‚ Ä® j<}CÐËpñ¾¬S³‡»ÉÞ;sz$Ð{Ϲ·.u<œ‹àròOeîzßÛõ:“Ší ›‰ ƒ]Nªøà “3Ǽ0 àg†^¹XÆtrÀDKÆ0u;£ë0!ÙxäÓÙ¡!ðCßóÜY¼_Ú´'fÌ\ŒÂvŸ­ߨñš~Ù×€®·Œ^ýµ:ÙLû k”išrñ£“mJ¦~›>þ&‹ôŽì‰ÂÞ”È{çÁß‹ÑzøÛz^²ëÓR¾üo¼IìYá˜7`ìí­Ò¹CÚ¸þ3¹ðï [›_YK{„µÕH^ѪzŒ°W-ïèUËÈϘñ2<éÐ$2"Ü&ä—6-7¤@Š’²Â,ÅR`Ùµ ϸB³67Ý×±ãé±öõï­ìV„÷ÝC¨_Ä›vƒ?œÅ>÷|_\1ªË?ñ¼)Ñ Ž‘9…÷ÿ-bD&'²Õ[ǼèQùh•„ö—¦q$eA‚Ô½ÓNýzO§|ꉄX2ÓAijpr™ÎèƒæKÛCE®ÍôÏ,VŒâ™§Õ3—Í÷ù‹VðY¯­Bªr‹s1S6Ò.(Ô‹"‘ 5˜£%3€b ϯ{³ÐT¼DßQÞ¡ ,H¥&y£mž I€òê!Åi²Ú'Ø—5tùÐŽ%\\ÄéÊLLDïl©a‰`­æe°*ø!ôÈÀGWcLï%~ %ç>u—&°!òç}A™l'lM¾œxÒÏg¡%Á„+L‰‡ØŒ|úÑ ™š\nÏ¿-{ÑKŸ˜N›° ¾Ñ, þZ8mÇσ8òÔŽõá3“ƒ¯‹Ô¶ÌmPÐôÄ>uåS#ž@Þ²TxØðÃò«Øç˜‹#â7FaI!!…qìÃåj¬:œC*cq­®qµ½ ‘¶S)ˆ†á_æ”R$³ f™„ÄEæbmi„6ƒh[µ*H‰Gf%gTe;£nTöS©ÖÜû]É_)Ÿìв`ƒŒI?|ΪQ%¨eÌžHNs ÔtBT— »qrC¦[„–À$Ùf' á°P 2¼Y'%¿Ìe+Ûýêܦ#Ëx‹nû©™8˜jWƒ—üðOÀÜ„¤gŠ|è,:$"™F¨ñPŒC¨áñÈD{¹àDbƒ/MS<Ì¥^böâç©-˜Ø@sžjçP鲕î,à ýë‚G!D¤Î#.ôFx¿ N2wÆñ¥°wÍ";qJnnzñ4;Ÿzwуž÷3´0 “&mÛŒïNÝ,NDz¿“Ƨþ[G”wñ¤õìÿ5–=½¬ _³¥õ¤ âkˆOªdFu…½N;¯^çÒFOÌ¥½ãêôÜÌÞuQ0Üfw0Õžæƒã$ÅšÍÚTöÇ['Pß)¦Õ¹Î5б^²¹šÚ{`•.7D}õzÃ…|Éòj|fâÖS;øÁ¨uÆrܑΡO)§Ê÷|ë8Nëu˜O/âcy01[“eÕF‘Y“H™’Pr(ˆƨZÓX$YK°ÍÆ%^Õe™Xo¹^2\†AeH…!ÜÚ•*nÐ.H+1ß“åÝuŠ,Ì-!e¦îÝ+ŽÂQð¶’€ÜNá:xŸÒé7°îv›ñååS AI70«Úv¹±ºLÀ„IJn*#¶ÜŸ²ôÎ$²i‘2ša¡H"(Š' IÿÕwzOÌ~ùÔª"æðœ cõ:Þ{‹Ç1í©0 Hêœ<ï©›hº4B *ĬäÓωS;î6NÚ‘f!ûóU¹­¢ˆø~¹‡\Tyº6Û)û̈^¿ñIç4qÕÄÙÙFàÛ}ñ¥C‘c%{FDÄéù"Ã:vrÌf4²’]îÜòÕ˜‰ouZ¶ƒýߣ|h1 ׎S¤Ö g”aãζ[³¬yÐëv¡¹0E61xžt =’šÁ^ÿ„Ð)1ÜóCþi¯€Ö8‰ßÍÅl”5o¦2K½PŲþ¹wݼj…£ùîÓ ç‘ù˜ùy¬k6¦~ž³öÉOÛ‚ø¯ê-1ôWZ‹ ómd½~Z’ÀsìD Q±âÍW¡œûær­˜ª”äyB;½NxÈJ ƒ$e#—v .ìˆJl°FÍ"„K"hr–^d©3§I,¤© Zñ5"ß Úl]Ęò …6æÑÕZz¯,;¸wO;Á¯ ö0âýb›#x‚Yç¹Ñ-²‹3À­Æ+‡JIŒ80X)©D˜MEA»P6CÂä±;ˆbóa@ØZ–!AàÆlQ%°Pnú‚\¡P-ÜÜ›¥Átó0«V! PR¢ó3Øö¿öï Šf7±9©;ÒƒÙ å2Co­%¢b£€›ø.Ûë–Ç‚oà\35¤‡й±L%%ßs¨ÌÑ^GƲI)2âH~Ü©ßÌwœ}¦ÚUωC‚ðñ¯ù'~GÄj‡¹’¯ä&ñ¾‘Ù-e¨[çÕ]±Ý,)’2#Üóˆ/aC'(—‹x€ÔÒ<‹/„å×NÒX‘õµ‹xgP±)s*‡ÚÇÞ±mŒÃ5 oÃk¨?Ê•Ð@ž¾5a0©¿ ˜òë^±w ¼zßTu6§ÃK$åÙüf¦8¶y6)³Ž›u-þ›JÞqù‡½SÌoíéUq…ÄMúÓ$H·ÔçÛMWTÞÍýjn…4É0Ó%jcjÍiçÒJrxR ²°Š$¸¨U ¹gy`âp"5ÂB$äJ #PFn3zƒ>â(ãNLæÈ–%rT~A­òD¦Í‚عç "…\ÚJ3hAÆ¥5¸&Ñ èoΤ®H4íù•Öt¾¶É!´R¸jiLgO„‹0·ÆêøJNÓ­¸dú€‰¤ÌJ:b‚BÙÒo"YVµÊB1Ä~_XäD`ZB`Œô]Þ“ó¾t_ÒQ»+™Ì÷«åFû[ëwe‡f¿àáðoÔ Wöbœ÷ó¤¨Áþ’/S½Em@;šÄ3–Ê‹À›H³Z5p_€•%öÕŒ`+,IAãÂ&ƒF² ±.@¡ÊëzDZÍ@*éü[Ê"¢9DUć>ã­KÛ‰ˆì<1££XЯ—îTÖž!Æ –BìÏHÐâo?±Þ¤7àZâgÖfu˜·X]<•ìd­H”B†Âú&ÓÂÇM”Äd82† 5æ½ZŽá¶ßYÔèr:÷\îÆ„°ÛÆêkñŒÆ¡뺙Xí—»•A&ÕcÖûÎ’\ ­Û‹™:O9˜_ uæ9< óó:©g‚˜8ý˜©Ñ?9Ü©Fö<þu³ñÄöý :4˜k}M¯¡‚ Aí‡gˆÖc¹ç œîˆäÛܟÛÔžDóÁTÿm&q aÊm‘pE¾u &û6Øÿ½RD°•Qj•“Éo4\ì•cjõ¥†!ù#‚4» •0LÐA­¶žÎ¢v Ͳ;N8¸Á™a7]•Q¬MfOƒãùÑ 1)&Q”ædïPã9,ò,fg‚µX˵Ø%MA›¸©Cè™5%-A?ù€&8€K˜ÌeÏ.ú‚â$à—~³Þ~5Yÿ7ÙðÇFò PÂqµÅjNtÃEØ‘nƒ:KÃt‰•·˜'a®ÖÑp’s¹›CËúî?›mGTáƒãÑéÆH’"²a°6C½phqP&Eæ¹ …Ĥ£p€B£9‘2 ‡àdæ¦d$ÙGfqz@RÿªÝq˧P£AåS”\à«$¸‡·Š”!Ä•±¯ÄÐ<ù¼ÔÖ¤DEĵ&?¼Bôº|ÀŒ{qs8© G×Vºýª1Œ\S1¥ ÒÐ}ÃÂêÇÆûIW‡K¶Çx<Šf4Ø#í2¼|S“¢KÂà¢Z†ž/^{jjMyûæ,–Æb<µ¥ÌbK5´mDÎÙqæÊà'O׬¤P]òBiž«CGR ®3fãÌj3¡M-ÐŒké×`vßo–ÔgòPÁ /g½Mâr¸¸Ï±¢"IÚÆQ›ntyH‘<#ï>aÔ`w ]Á„ÔaŽu-˜…ç6¥wøÑ ö\¨²Ä>Xã°øw‹»Æƒ” _"¹…ýÚ fR¯óˆõ³ßïúuÀ¶ÂþA„1GœDÎ}ê)sƒÈ7[ùÆ£Û³"HVK+0õª ˜»@“v!¿&æˆÄ…D†K2Ò -Íf ~ðˆáÂÃ$ÿñ=¿_˜ÕOñA+2uSj~ÂÈ´HÈŠC ÅáÑOA7 ÎÒt¤¢rª"›žÕ´ið˜yZ<³­ÏxA9¦Ü™õš‹(o¯”xšõZšŒ¹}ƒ}÷ôê‹ÞãxÆ–RŒR8Á÷~tÙ8Œ¦ñ^&^jõB@*E —ÅjÿI2D¤Ù‰ËéR‹R5 *Ì(£Í¢•ÝòâauØå1öŒþQr›ðô3Opk†Ô‹ˆ’ÎX¹ãIogoâèpÛìñ:É$§éÜÁ©"^· <«nuù½‡íWN@.êžj‘|ÄyÕ}9bÆÄ7ßÞ]u¨ÂÖ@¾ÿ&аA¼¸-šzÔ yÂâ)¤Ž\Æ¡<¹‹ÅXm•À8Ô;‡ëkA–EÜò9<Ô!§² _Ña›uï• Âİ벵 7–nŠ[NQΚiúÔ&5ŽK‰-Î¥+ú*¢ÊcÄkfË´ÅÒÎ$jªa14-’7Moˆå§šÚº¨Õßífô#7!„Б;Y³Ñ7¹üë—}~Cžg7¤múÍqüb´Œ8}µÙõý´†ÌÈÂGPÅtóÌk>^©²»Ü;«S¢QMÖß$Ê76Öœˆ¶JPÌ Þq2ÚDQ!)þô²™ »¸~t¨ÈRåfEùo—Eæé^©yñÖ‰>\Œ^ô•þµÿ‘}¥*(XÍ7Œý2N¿ÛBW={ôè`ÄôÞcïP ò?IÇk ø'lÌú‡ú1¶£’ÊaO}i40¹+ªp¼FƤU±(“{"ˆ”K +¡ pO-äs‡òÔƒ0„–ÍL‹‡ Gá.窉q“[§Œ’c.dÑJ¦ÚG€g7yW÷´Ô@q$Ä Î"¡&È]¥)ĵtãxôÆ9¢5ârOŸïP•ÄlÏ5Ýs›­ååGâ{œþ\©iå܌ȞË7‰½D4ß9îÕwˆïP…ŠË9†Kâgmµ*4 ¶}øÐ¢©¬ÄÑb>æ ”ÒIÁ¥n›œòbdf TNpâˆ#˜ÖÜ¥f{å33òj)t<<_Z=¹©àP®Ùç„xpÄHôÆ6Ã:6Áš¥¶OïÒ°oˆ¯âÍcFz˜ä;}IÏ ÂReÏÆ “qþHŒÄáó{m  bØ‚Â0¬~u›0°ö¥ôËÆ³Œf)xéû¿øÚdsç¢âŸ¯Èýó¥a'»ð7ÖuãÔ| ÙïE8$L™ÂL"CpÌÔÉZ £Þà‡¡§z®5µêHÆdË礛(!€y´š$\—ÿ럃ó§¥"RËu1˜X7ßObË `Ù4™A/¹ÑJøBªIŸº…M bC5„ojdÚ`¹3ÄÂs†?” ÕíŠy!£å«#‹ä=³@t-¾º.®žÚ{ùás¥ˆ˜ÜùÌÿ9½uNô}fȘä]öÌH„LdÎ;ºUé£"¬X+&ÖX¥cHL°}±µÀß.¢N*@<â^vÐê=¤2Gi°Ë£ úDÉ ³ŸÅNݯü2pÍŠ¶dØñ£Èlm0Ça~˜™…‡‘zQ¢·Ô|㕺؊3Ć‘ê¼h ÷Ø"P«¶I u¬) t…]s¶„`Fi£Ç™Æ¼dü ÿ+Ðò‹Ë‚hÆXC±ÈzäÎ%Ö„3P–/3ë­wÌ ø‰Žâ5€Í‚<†)ØœM:Bê`v2¶¿/ƒ”c—`ü8Ò2U£Ž;²ÑZå˜Õ<svÕÚ¤·ÜÇ? Õò)\Î.Ìì¼Ý7‰HÇ{‘¬V·ÚîHNÙ¬F˜PÀ²>˜»±Œw¢Eþ£ð=c}õ‚å1Gl¥^‚úŠ–LL"üúVŸÎ~¤.¼âéÕ Á%–¢ ñç»T¸À‚™#9OuVIl–·3Z“4ó>¼aœâÅæP| ùx¶4ØA´…”%*'3CZBűELÔÌmWFyüÈcÌ4|­{@¢À"@`@¹$LÒÉ¥éba á±1pç³N݆áq‚L#sÈì\6°V1)tJ`®!‰¬ÒM]jFšÊ›an{ô#, 6Ý3>ŠêôŒ°­óT1“;7Õ ¼Á…ßœÏK~‚ëôðìúþÚÄȯãV”›Üf_œqN÷¢OxøòÓw> “ž_¨–ö·g¯aÚ†kQ71Ç7v#ÄêŸÌëõ(ÃE3>ØŽæ4ÜÊHœÎ ^iñ£Ÿt¾A¦ªž´‹-W™I˰ɹf‘AåÖyÎã~o©²qœÄç1 \à²â›z59 8Kè³ôh—ªÚXšÙcHl:Oûû´CïE”(Óõ-¸Ê«žåöL‘̳çN*™BÏ±ÆØÛPB{ƒy¤îÅ[}{y4¹œ£N¡¶2úHŸ†uá©Wæb½§ãFv¢§\ùê'‰%ˆ’mÅ¢’â$–aØÞE™ß-·»ƒ¿ç¿dÇsšÓð<¨ðÌâçãUû¬y¶|ðLàLwxôL}øÖ Ù‘”gûÐ, s[Œ—‰ŠÓ‹rRî$bÆÆs¬´–è|Јˆ›ÔM3o9vtˆ»@ý'Ѩ¨(aóoYÕÄ¿Âã Çq£p=qÛ´Zd2zaAÆüsª…±¸…D“T¦WƒŠ?zÿð ;3[àN`”]¥ˆ¼Èè{¬¯ \Žþ+F€ ˜”YšLÈ$ó9‘nÒ¦`aµoo³1°â–™51úìnÚïtÆ0ˆs'òsÎJÍZ”ÚYæ¤ñ ¥²3ACI]µ. 6xeØ&¢§Þ£RdUþQ8¿—P 9cáïzùβg·ùtÒCø%õ©L¼²Ï}«Ápø®=Kt㎳ðGáÎÕ¬¬ö'G9?çũz£ÙÑ4{ ¬m3™'Iç …l¿‘5A„4¾#¾¦v{[YÔ•Ç!˜¬)‰&«Pàº"'k(±ÙÇw2|ÅÒ9³q®«r ’~™wƒW[’½È¶ðupû€)qé«(™À‚wÆ×1:²ßsO ”|3ÈQ¼% ¡—‰’9,ïõˆ¤Ö¬RL› Æ;žY¡*|¤CÃc¨”\íü¹¼S¤†¤Âº¼ß”éN±æIÐr$/*VYú/MkqXؤ¼NÞ ¾.ÛÃá1Ö¤Vµ'zïŸ:qéŒÉ¼àßÃÖ€\œl%àÚ8‹ô—>~MqçæMr&Ù·½tqq3H¼¦}w‚´`ÌïÖHTùœÆ›žFdß䟔‡ãY þRwyÞd:$v. ÜAóã}ØÅ‹^aó)‹ÖîW|ô:óz„p +Œ' šÓ¯e°«ítŒQ ˆ!‚¡e˜¹Ÿÿƒ5Ú2ÁÚµk†"$1²“!)á Ø« æäap‹JŸ“IsîÚ.†%¢/è`€ÄÀ¸Ã/RÙzvHs !  HEH±•è7?"FûzÉ(-ÛY5á㉈¥tJ4¯=$ÊÙô(1aCéò`ˆvWÚ¸žäàùÅî%x•“dâ5Õö“ Ï Ìö8×HÒoá³ÇZ)ö¨Bü+`œêJºÉ*ež&g™Ô´ ]ÙŸÙÖÞñq1Î:—…Æ{ŽÛowËD{—1,aÚ6à÷ÐKÇ3²³d:XX–aˆ×‘ ÑòïÙ4ñÚOŸÈyÉ>)§eÇG·/¸°Ab ÂIMÇU¼jµ›k3*)aŒÝ¥ˆj4†ð³‹æ?˜ÕÝøI&öÎøâ˜ ¹”ìßÔëzBpÅ;T~gn´âÉøÚyˆ"Ý.¢1TÀbÙîÓ•˜HÙp{D}çKVen8ËF°h#cĹyûΙgr”\ðDi[Q3©ª1Ë÷©΄Õq‡£î¤‚²£?Þ ÌØ}ßÔÚßÉ2  ³;xÖG¸é¯sœrK¢‚ #t%-Ì DéR%AùP®wøÔÏQJˆ¥';7ßJRÙ•žû†×­ǘÎPð›³Þ¼e¹îüâûÒræšÌÆR;É÷ ‚W/’`iqri‰¨·bîª5²›âþ*UþÌ'R$ÉÜÆÏîcQȺdn†Zå“BŒ «Å‡:C$GøL·ìøÔ … † %ŒÅc–MJͰD.ðVÜ$½iQ ΡX9Ìéû³‰%6øgtv„eX3ž3žÛ×mMøZ›³;§ƒˆ3£àDDCci§TJtw„kf0Îú~„³ˆ€™#;­qò‰ÍÄf3‹ÛOAQP­¤7’²ÈPLÍ–W9Þ•9&]ÀÔÜw|JΔÀ`ïÖ4À|çÆ·Ðl[ýŒmz‹`’µKœÖoÜ–³„˜ Y–´ArVÉíÑfg3¢ž%,ÄþÞæ•[Žg˹ÄÀlÕ]•; Ü^#äÖ¥;IØEîüi×^#óÄBÅÔÏwŸ¬ß88‹‘ID]K Ëii¸åîàcd*ŽÚ“H†,¨ÉãiЉ=‡ \9«b^Å7 ™:kâ¸[e³ó-‰¤‘ L‰a˜øÔÎ0忝–—P¶¾S=Ý=ÑÖHÀÂ,x©ãJ‡í½å‰÷Â2Ert_y}ã‰eýŠ"1IÏ+ŽsYþÚߢ6Ó‘JNÀî J¢vÖÛ wk\èû°§1;é“X“˜ÙËâkSŒ¯ð ÙÎ5øGù%å‡J…%ðn"´t%Id¢nCpÕóC̪KaœÁ'R—»åý÷ÍÍ`D&sk÷ç]_­±þ;Õþq‚G?h1¨L—fI퉂¼Æ×¤ý?8ùó›ÔزnvK¹¶ú´gB*7CÒxt„C!‡’E" ßÞ¦Š™ŒâY+ÿÂL–À‚žØ {œêe•µffKt+ TÛºsµ1 ©gk&h˜N‡j=‚r [€q2†¡ªnj°[L$2縎¸" I+9$Kˆ¼'ÛÞ± ©™ä˜bú­ïVåD`Û&z†¶’¸B1y‡^¦c@É& ¦Ã 1;ä˜Î¥¹et}€¸~zfÜ0º\ì,>Ý$sI•˞Ɏ“ºÆ6?Œß}¦q¡ßi¯‡{ÙÐݧÕp);²OsÞ‹êûfá¶QîOg3È]›÷¡{èÜÌ Ÿï6‰ Jn¦Dû”°‚ÚxÙÔ’ô€x a}Œ>±ª~p£¬¾Z2p‹>rç7ž$´ …ÛÂ^“àS;v¢±Œ`ò«þ OÎb1¡,qŒBNy×ê¤(ñÏ¥,æ"ùë„qU¨":Úœ1DúV1ÀJ±Ïg5¨eÄaè~¹½6ÜÝé#™Úõ>§ÄÉÈcyñZ9LɪLvO˜5W+î6ýߨÕ,9¼Ç¿Ó™ÒX´Ëö2û];ënÙwú‚¡¥•WòmòäÑàn»¡Ã1½Ü”{HÏï]¶Åüºu5¯A¿°™sLF‰¸ª¥Ì3l<·>ê dF"sþÀ3åÈïôÖîôÎG¯›Óx¹ŸõçNˆðL|DíµÕpé+ ºÁY´+X.ýù.Z;ߨ4à“á”p©ÛSh;˜jL¡.I b4äåH*Š©ã½ÃæI.oþ"Bc3á7íú9ÖÇ4)5–Rv´‰”äåʼn/ß1©¼¯d9bDdÂçi…D²Âsl>ºG:¬§rÆ&‡†(wÒ UÈÑZãax„4áPÅ¡´’ÇQu´[¡ŒÑ¤È‘$«, IX (ƒ1,¹R #Y6™0,͉fÒu‚ &åEVb±ÛŠnÇ’ÎÈï¶–R×jÞººƒM¤Ù"Ðâ”HG'½!_ú;ˆþµŸlwå|Âxât”Πy ñ:¬+Î!Y#3=C©³YÜUizÂ%4¬8móƒóImzò,A/ö$]MJÙÌBÊs:®'%9ÉS‰Ç“•qPÊ›î£Ï<âÓ¤«?Bf&º5¤u0}YûE9Ó´ž•>Ž´X½À±\ç˯à¹î±ï5µš#É%¢ÒÉf¹]1@œ°y`¦KÀæd[׿em£áORÿÎ¶× 7UÔäæà%…dºsS‚eÀrfWl ÷a6ϧN saô¶ÙÛÝÖ¢{~_N1S7Ï,Âg,>^3¢¡'d|L©âå´àÔƒ¡m ôFÅΧÙüB>¼Î¦þAÛ¸Ú±¶¦ß7ƒ˜. «Ï<–(ÛÏØ]¯Qi.àÂݬ>úò}¶Ûmºc¬ÏÈ¥Œ1 \›ÛRŠzc¨Ã˜'}ôÌ_ÓkÆ5ƒ£<7w¹Þ5+ö3LGßW v„²¿°¼0ùuåæçáuü`˜gv…‹Ìÿ$ÿÇU¢ö$üþ¸ÒÛ{/ζX áÖÓ‡ŠÛ8áË•b &w¥Ý9Ä”5µº’,óÉ´xLJS[oÌž<ÀX ‘6µ¦”;2(®§y­ˆ‰ÄŒW±PdÐ(%#H1™Êá\’°mš)$¨Ã—–I­b@L/i©ºª ¶Nû´Ë`‘P=¿h „‘„Î(4ZOð|ꡯÛ#Ö­†ÇÈ»‡xc{Ù¡G+6a¾c¹¯âCÒ“Ôýké`&ù:ïS `šç~9©ÜÁTž^™älð€M¬~®ÙÙ7¾Žw'ðI7™Äj9Ť<¨óÑèräùÌW¨üL–{± f)„öQÑ4+¢’Fb×ÓJHÂÉ Fyyls©ë3€Œo·~]º`ëË>„œWÖ½»–?Å· Ûèq_z³†§F'µÏ#ùÕI]<ß09Ƭ´““ce¯*ök•­™q‚:©;1§Ä@·‘Äü\îf5Q«‡r4yù™Öìƒø±Æ°ÞàzLës `è¹äCàãm(1šTÂB3yk&g/²à–žpñ*c _/yïˆñWôÿƒ´i4ÝšƒÉÍêJb+Äs×H¸äwC06îbúÕêsÚ~ÓÆ¥ÄJ9ŠßÈ{4Ìh,ŽF æ5†xÔä#;:$o_0ÞXÅÎ5þ€þµ¼ÈyÜmdõ NpKîêtÝq‘…þø;ŸÇí:r»A*†L q¢WS8ÄâcfB qz „w¼¬¦b&§&ZØ•= Wš‰®]brWBl‚{cmf¦ ¤¤¡nwaê!En³áSNtªÐ·JÈE”Š5”uq2¤â‡ƒÎrš›ö ô%}*m*š,­”A•© ·jLÍdïÄ&X „ÑVì DCh€Éš(štP†ú%¢‰Ç*N}ŒTÁŽü\kd™ÅIÚ ½ûfô¬ÑîôþÚ‹’¹ÌïÂõ”o@±m¸”é3”æÔgG8Ÿ4ó¦ ôÓòl›»ßV@R™®%;>Æ›ºdDž³¼f1¡‹O­ð†jÝ«‡Änù›¼q¸ûPÄe›©«u.)IÉ 9–à:âP Tfæ`§7²MTèû?Ÿì:­óž*ì_ÓSßq>Ò=ôX¸î€$“ˆÎÆ¡IDž"NôA–ÙÖŠ…ä´zO²Ù’>æzÐñ¹ö£—IšW8b¡rEÇzJ–Ç™¸U,–§Ïæ°vl ó®RC_EB°/i·”)˜˜þßÊi„5xûì>nõ!(•ŒTMH5—ljiÀ{›1•öOZlÊ`G.aƒo7&¢28ÞºA†›Æ¹ǽIú~G©'&ýGÇ9Ò½Ga]د‚ùÕ]™b1|…<ˆ¦[³X—ÅÇ©•¿¤ÄëúÎÿ‘´çUbâD€|™“âuÓþ»-,MÖo4'ÒœãPŸõæþ5Š1]ä|Ç÷z ¤›ôÄH(ˆ‰–Þ¢gA>8:Uï#„]ÐO‡ì] ÖïÝ£<š7*ˆ‰X£8(–A?ð,‚C@Ò‰Ô è¹8€Ïr¶‡ôèp#ˆ ¨NêI×=‰§áµúEyÚ ø¿HåáJx=Û¾ ðjXð¢exÚ]d€€'u žõµA±Œ›×wÑ&6%x¼×»ŠÒ†â˜{H¼W±!M ¥.‡¸n.œè^<’Ûµ\GÖº (~‡IŠ×Õ|¬zb6ÏÆ°‘{~mýßSù§ÒÌß}jù£97äfãÑN—ulæs>ç ¬5 Ï,*Ø^œ¿% gl?}ñưÎ(Æû…ɼygXs[~@ÝÇZÛïÙ—Þ>Z[À&WF×?æÍξDE@O„ŸD¿t^çØg­.å(Fè'rü:‘‹£1`Ž[”ÙžGÂ^~1¡Yš–‘hvÜàXycïuë­Eâ.wLL¡C(woã–¶Ë‡å ›ƒqë€d·‘‡6u«%€,œ‹93Á XBIÖ'œä™.ÆìMnÑ15cqæIcðÜ;¦§ )àKs°ª­>mMD.´…Fˆ ¸)¨òÎ ‡ÇÝ+]ÖçÌù׈×?¼êÕ2w"÷w±¹¶‘;Í‘ô^~µ¸ÐØÙáüæ^dFøK³põwñ¥JnD¾$üꃓ~'¯&°!ûˆáZ˜¶¯ÜÏmB~,g>/zŽ2®Bö×QÂ{&ŸIÏ!Æ¿ Çè `OP÷¸ñÿŒŒ%‰‘çK ƒ] bÛŒ$Š‚-²]ÐÚx𠈓ƒ¥“½Å‰>Ôs§½(ÎH+»[k{÷¥)òÉÜo´\.0?*q94z‰d퉈Ý=»Ôµf@ª1¶ïZ®BL³äO%:øj8ø” â?wy.`¢±n`Õ‡3S¯8Žg^‘ëí%›…'¸ÖjË›òÆ ³¶‚AÙ&=‹í¨©2b˜~fb²G!Láœþã,æ{d*TœsqzŸÎ‹ÄQl||lûÑãÙ™ÚÎÍaq؟ƼÃŽÀ“7ÆŒA†µ§1Öú…qmÚTowÜêsÌHNF–ú@ÐxbÆe¸' ÎÙÕ[yà„n>U·95<,JeWC¿™Â‘ efjòL²Ž\ÌóÁ°ìRä¡°¤ÆoRä¹ B*à5Q1÷§!”GvˆÂdÒ9'Êû'³iôi«Kv×HªÕˆGŽ¢ØhC–tçËÏ4>Dã,jæs ¬žÑä0äˆËŽC*•¢8ÑÀ@ÈÇÖó¶f–+º¹î.DùäþÂð±ì˜þ]êptÖ>×>^ôZ…åg¹c!¼Æ¢°ÌÛ$™·‘Äfµ bä#‚.N#G@ÌGÝÛå>B¯ -˜Nxœn<ÝsÔ=ü0/­)ïžcó9¼OäîŸ;h<µùXTÿ-{0‘|7s—Ö«_Óü4¶-£šŒÍñïNÓ‘˜Iª‘»Æ+ù\ÅIVEDM(˜°ÇQ‡òóþìçC¥:Kï¥d?È*ïþi‘>áùAúñ:N9lþt2y|_÷bs©ÌaBr`øûlÇzŽßåì{Ð*ÞTt‰Úc©Ön!%Ó‹–u‡<Á £Æ5üFìÆ ×p95$úmƦ&sÂÎ'ðœùÒdFe¸ÜG‘ƒàhúd³^K5(ådü?©5+³ «?OÿO ;>vO¨Û[éxœ3ƒmçPŽMJ!æm·=N¯‹eÈѸÍPj=±H]>ãzE2àZÛŽB‰GApGéÉe- ¸5&ÒšNl$%#}³TéƒÇzPÊ8ÐR€çÙWóÞ¥ù±?5ÔÎjbõ9®N†99ËžY(8¿É¥™ü]ô#Ì]‡Oµ/3q®ïC'šÓº#÷2G7:³^WÏhžcs7¦ êZZ\Ñ´gOêÁÂÁ2í´Ö‹¨Šxa?Íæè»ëEñßÏz™LÕ3ëbä~Ê»ü÷©yŽ tØp„kÅx®–ïhûîäÛë§1˜…`8fóYtÛŸ/•:òT)‡Ý¿¬°«øO©æÍÝ%ß?OZ!!è¿™ÒþaþN¸ÍãŠoÞ µš§Ñg¶/õ‹J× yŒÌeþÊ«šyqå¦ñÐÃyAí¹Ø\ÿÍ툞X0m3§âäòìÛ󢆙ØçpI<ë&µ•ü¼þ®fŒw=ZV9'TÌ¥ÛáÈn¼ê6;¦~?n[Þ­ËZZw«Æa7«goockTÕ3u£)~CíëSàç;kþuLù¡SO/5“ _î†Üw¢sšw$Dìç©þ¢gqcÆ/ùh˜l¤ »‘]é´Ù%{¥'mN«7"ôÊ:ük+Ô‹%o¦úþšcÄ^êM¸Ïå‡mÆ"uW{ŽÞ1›Dym¸øÅý@ÙÝñŽ®2Ür¯ÐÕaÓò=ÇÕ½J€œ7ú,㓽e ”ñ{5o_S*È;áO~5Ý2í˜ÛÇÞ¦#±ù~$÷«€ä‰ùºG÷^45‚Ü";/ì÷§µåKC.›2(ËÎõ†)0ÞàM Ƭ €üÌ-bΆDMmg$™‡Fʳ‘Nc> yö‡±99iWdæ•Úøyɘåˆû‡0÷lÏK™™3<Ó®K†=ÛñÛVAS€sŽeîõ·ŸPáßN”θ D¼‰QRs©I&øzuк?t~ëCÈèØÜåˆø€|á÷ÙÄ÷¦ÝCñ#ÕÒhؽ}¤zá×½¼6øxçE9ìøziS)1#èNüjiRâ3x yvøY¢S³PÎ7ÓÆ93!¡ è×äÁþNN2MeÀu:œ)Uîž_»šÏ‡ßObñ_®†±ŽCX2•±qof“1á5(•ìÎoÛ-bLÐ{~u>9ì)î֣Ɩ.ÛšV$Üö†|èÕ×aô¸äÜÔÆïP20ø;lkäœ:·Ø­F , *(¨ó„Ο5‘æ³õît‰x1/åpÆ1¸Ÿ—&¸ûä~‹ó{Ö=–?2#,jf5(6ÁÆÄe«®þ¬ÄE¢süætkñ‡£ñ¢‹2[DÛ`ôÏsUDŠu¬GáóDjGÛ`«±Üm,Ñ ¥U÷féCÚÜCä®(ÕyfÖT¯&ÚfáÎ}¢úŸØÖ,Ã}”ù>´ˆ ­æãh¾/©Ï೫Žüj6{ÇbøcP3”3©Ú ´+L"Eã.à Œ581xDëæÚƒP õ‚yËí™ßNîòùI“ó¼i³_"C¤‰2øßL2¦q:2_šO•3á}Åj…)ÑŒ!.Võ5ÜÍû ¢êÕ‡gž¯ïP·pyêÆüôŽbå8åCš¸Õ•M¿ø®û§L% ‹Í¤9þTš”îÍ?nv ¯Ìç ™^FÌ”<Î*CÌŽôñ·ƒ®þtì\Ž8ÁJ|ºÔùpMÏ}~ê^»ˆè“©êçE-fQá€'@E ÍKœ™¼Þ‹F§¥gWÑcu®ak(]óóŒ¾oП˜|_©½@Ðå=³§øûÃsjûÍj,¸lÄÄ{·Z€Ø3•9ü¼ê|¤ñÚ¯ŒDÌWg÷ÿà9Ql¢nHØLzVÇ@Þ°Ÿ¯ZKgÏøÐeǥꆿyÔ$îF1“{IR–yâJêïçP±³‹Ñž¾uDz¡áSù×9¯¿ø_'w“sÊ/ì5¦“€¼Éý}¼›æUÅå©]8ù*Ä>ilšÈð3óýº’õ;£½láêdŒ0´0ŽÈŒ•·Èå5.Mv6‰°çq¨Ç/'‚cãUÒ‘1ë3ôž5ÞVSÝ:£ò:žn!£Æ#xç¸õC-•!Îbjq©á(£À¸¿ât=<ŒþªãS0ÈšÈÎ#1½ìãL]¹éÇ ‹˜ÔìõÜèÁºzˆÒBQ7 MàCIÄ_zŸÅúthdaÌÃ.ôqëJÙ8®Fè]LuZqMÀ<ò7dqu?ýª}ë~µú®7Ñçž¿_´¼—;iÌ£áùÐÀ‚L/f8òΠ¤o-Úm$ûó¯Çù 7œ¸ßPp OÉ×N²¾c÷:ü—ÙÅþ´(·ýàÜlŸ¨ÏÍ#Ù§n=éä)‡˜| ï´tG‚íúê²|MJLJ=Àž}ñ¬ zÿ#X­í~©#²û7ü±Ày¹°Ÿ”;þ!G•oÂp¯ž™.«?b¶â%BzÎ¥ÇÛþ?zÿõ9t¨e#éæ.t{Ù˜_/Áó¬+ò?XÏ’Šxµ:ßÝ–ÿ~ó;Η1ž?ûðqÄ)‘sËé»pm-ì>O­0&(â½Êq»?:(gð?g8M“ömö:¤KjG°MNn·Ô¦™M¼X‰ÊžÏæ]^²ÁcË íÓ´òŠb1(/…·ñ©l4šc­JPØî‚9ôÒàÿ($O†g5©dÑ³Ê ÷œgg]…9Tq{®¯M¥+ÙHöÄæÔs.ÑDŸ­Lý)‹©;ÓZD9f b zà1+â<Í÷ãEoSÊæO*ÙãEÈë‹1ñëKÊn[^aÍÇF«ž5 slCi˜ó±å ¨C;Ϧ6ƦÎ'0‡ÔbH­ΚÜfÉÄ›ìº0l?±}óv›Sk#o­~{Ë“j3UªRò\™-?öé|öR3lO‚1÷¯÷ ;äô€³þ¦µµŽúëšüfŒy’sÆ·?™fB¡èIûC«¾«q<Éx&1©£³‰}Û,8î¢;„ûÝä’ª?è?¯&Ÿ€ü{çwÞº>ÿ¦„y$wÝHïS*­–& Œ€<Ñ%¹KÏ¿¹ø ˜Ä!ùŠ£&‹¤êYMÊ `Ùšu$Ø ¾—JÜ~ñá³{÷^³+ã…<~ÇwõëK†’ª(*m%îÛpgÁón‚Ë=poÔs«îÀ£õ7˜< Ê ,!ÚN§ñ¸{G(âs_ã^BVÞÂùã“EŒæ“.“bpåžk~Â:eç—.!y¹&fÌ^b¢BÎ×~Ýk¨àװ߯6˜hRŽTûi™?`ƒ c´=!î?¦kÔýswŸW30.h»ÅõiÄê“1WiƒôW:οGž?>Ö5N&σE’$d ä“û<šeÀz˜ãŸôémRñ—£ÆjµŠÏ+ú•t#ýµýõ†‡äý÷¡¡e͘oŒÞ"°MÈÏ9¹Õê¡ ¬Ù“÷£l¾Ák´6}õ¥Ž]¯×AR™˜ÌH,ÇY×s÷Þ‚"Ä”ÍüßsJ쪑¼">£:“錽{Ô s °¡Å pÜO¢Ee¾ãW0ŒV&&s2¶\jñmÍønIQÖ×ÉCÎ#èƒM*íÌþ*þñ¥þýyó¬kŽÿ_ðÓ´B!HR“À[nƒô eŠÀ˜ ™¥[þ‚1ŠœŠ!×˯4/Ŧ¯fÝã_ ˆrçü±×äþ˜ô¿¨‰=C£ðšý¡ý빿ÑØÏZ‡ËxC`H³—dk½²®$ìõëJ)#™0Ì åï¢*oð"»úÎúL„Rlì°‰å.«RÔ.>ʸ˜PËÆØÄi‹l"ïe:Êí!îÊí-Àøû§ÅÉý:ØÞVJ¢(V·‰7ñ¨æ“ !ÄWNÆäŸ_Ó×záLÆ8_ÄbµX Ô‘ºQƒE*F&@±Ú`á Üõùƒï^G×ç]Gõ­¸w• yŽ9œ^tXðÙï¯ßN°Mfwçüh™‹ÆÜ³LÍ|ñ<è#)xþ.4uëñ]}ºYG¸IòÚƛc6uÞ§3SµùÒ ÞÙýƪhöŸ‰—QiíúáŒC>@‰Ü…°RŒ=i Ð[²¡#Æ{wÑ‹ƒ,T—ãQ˜Ãž]yÈ7nÏÛF—‹)€1(p.Œ•–`0 hUËÿBM1Ä„|c«€Ô˜kÄ@ #šÃ˜`~!ãó­Ä{? Žg©ê‡éü=iIÀÄlž‚Màå5nQ–|AüõÅ'þe–Ò×|WçA[ÊmIxðÞæ}ëÍòÐnGÞ”¯¤BÇyó¾½1ô?zž¿ãQ`·2‰ó¦AS׉–»Î‰ÇyÞ'ûÿ̰çs"ýñ¤“0òªL>ljtÒ¡(‚H«Ûc]?ÓFƒu'.—qðüë8my—t#÷Òf\Ö9åïÆkK”£âÎî´²øí¡¿÷¼1¶zÐi¶&iŽM`ò xð§­J̦›n~3ïHÊüDíYø#ýŠÂ' ×¹„Îñ Å 2ŠO« éŒæZEÌÚÞ)ÓTû,ô×õ¤C’”?p}´ƒÈõú7ó§Òèo×oçY5màÙ½ÅcƆäž!AÝëG|i8_£Õ‡÷½L(E¬!•…Eøg})…2%øž4Lždjw“öÉb»ìVf"vX¤yÙ"5„Å3Nòñ~´<*’)FÙÛ€T1É)ûßçCÈæ"ñ˜ˆÊ÷Ê‹×\êVÓœ=T™ûÒ´Æ~Ä“Ìký3Xô??m lÄÝS‹‰Ò—ñîHõ¿Ïý[¿”rß\Sø×šPúŸW¯÷gÔ§žb®|±ƒR|¥Fú+©¹ã2YuéÀ«z]cd£Þ'&ö+Wøͪœw:‡™Å.µ%*¯ýW ç—€AdÔD@Ïû“hÞŠÃ)(ì)›BGT4ùÉ8‚Ъ»Öªq ÀËŒ’Õ†‡(:|xBg™Ð?š_rý®õýwéüƾ©×ãý}àVEÁó!ñl:Ÿoárh Sß ê¶jè×û-9õèõc¿ç.¢¡æÌ­=–&¡ül…˃ϷֳÇí3òç­ú¿Ö‚ò{ì–?”ëüº ùW´Ñéød!Ôìͯéç^2| Üïu§¬ ~,zDÄ|ºœd]‡©R:“lèŠd£†J£²uöÚô8å>t©‚þ5žŒÌôé$;Ù}[Ë ½’Šæ4º—ÁŽíŒïßz€9lUCgzgÀež 1¼~tQíIæcÐf¥AÜBÃôÆ™¥~›HCþì¦Ò’ÂCnb„WŠÔ»°¸<ôë÷oÕþÁ©ba»-®2# r¬ºWˆš•~e½.„¯c†ŽºtY`Œ!´æe‹±5:³nyN>ÿÆ»¯Wòî¾õG½Ö. ®žt}býÝ?ÄÎeèø»÷üF <å/á?3Ѫó_“è¼§ø¬V¿V¯:lIˆ¹éóÁ¨(q}RŸSÞ iy9NÝeŸÄ×åyÝfô´®b@< ´,2]KËòÿļ¿ÿ%ÿÄ+!1AQaq‘¡±ÁÑð@Páñ0 `ÿÚ?ÿdƒß†þü{E1ªOo‘Px–ztÎE¥V„ b.Qg;ó “øGèþ)Õ_dþ öÿˆ€A¬èCº mHó»Â•tMƒA*pn¿êZøAoPãó…D©ØŽ'‘yþ ýx;}ŸùP@ÕlÐô.ð¶6òÑò@’ ©Î™‘] øKUà2„ÃîP1ÌDHA:Ú²=†¯C²=C¨œÁ('²òˆ«ß0¿”õÇ ]†öO'Ÿ¶s ŠáÌúeŸ/¥Ê ÅRWPZ v$¶ƒER’TÅV.’”Š—Cô!i‚yå ‰I€˜i@`4Ũ¶!€ÍÄäB⚻ǗÿÓ››Pon&…~¼¯aŽø€‰G¾`^ ›Ü£ç¹öåàøq)÷Qé%’%û£(ùb`Ôx$è€ö©æ÷åDJ*z‡ùùåivLý{¿¨OWÒØýŠu¾8Åa0iªÉóú<ÉTDì.>~¿S™HR÷t …ôqžzѪڲþÅŽ”"È.¼.Ðqq/$0&–b§ñÞzóYƒ/•C:v F=“;@ÕK48Yb›ŽêSNþ^ âvËQGded*"6…€%7…Çð:ŽVì%HÁKiYŸ=êð|ÓÃnü™À&-¬'š§àÏÓxî¬`£Ú L>>ù ˆ#àAóC륗EŠcQ”O¨©FÎÎ<@¢"ýE>dÏyÊ´¯s¬>ÉŠ¤áð䥖üäã¦wm0‡é0GÅÐ}à*@„/`˜*Û Ú(õ=2çÖpEKŠVÕjY ÔràB@V¦FŒ=̰‹éÇ«žîмô.'ç´êoë×Ì(Á3Ú’^¯rp/f.Å5™Ÿàï7 wj¯Æ¯âÛ¼ƒ#HIÛ6tˆ»ñxºZ€hLÒ/ç©F8ÔBª°Ðm|<¢í° ˜ P Á®©µ’öÔ‚‰ŸøÓr=«ãöÆý¨øéíûB–ç">IýËèŒ}„q¢b Fªšòî5EèRôZ—Æ;4PцW¸ÐuÙOØ©Šªé4­v~­~ S+Eõª˜ï9m³µï©è¤î‡1Bû,÷Ö¹óûs!ï3a…|ïóÆ« m5þÆÙÜ-â”§aïå5Ø>Î׺(\V9Ÿ˜¬·Q@èìŸñÊt?È‘ µƒMÂ_(•½œÑô¨)ŒB=ñ´r iŠ^˜Êø«Í cGRØHXb9ó¯õ?ÁÌInÐN¼c„Á÷o¥íý—Ÿøñ?Ç×\Ô±Ôý°ñÿ/˜­Q?Tûñã¾|XKkáDCÎÍï¥Kêû÷Sß4‰+åRLè÷D© ÿO @I„ðÿØèlªh#hõûÂl *ÊÁ8xèFhù‡’–fcÃ=Zˆ1ub,gžóŠ42ˆt0Ð,›ÐwÁ’wBo¾üó@¯K?0|õ¾O9*T5EÏ ç|ϯ­þ^zõÒÿ‡¯¾wÿKÞ{ëôˆQ®Ï^^ìøàëݧèן߾f“z[„_]ï«ñË÷ú?Û?<µnÛÔ'Á=ø=õíÞä®Z{ÇÕÃõÇÐÌ“gãÔxÛù>:ßäY">a;îÿV÷ºU jm6LSgß>NG:}vìýzï†`6(Þ¿ž&Ÿx ¥§OëÙÿv$Õ’bH¬¨‘£kŠBÒ gfA£^ìq\wä8•WÍï¹å<Ÿ¯€ßa0šá<÷ëã ‘¦©¸aúÏ¿Ž%¦¦õŸ–;›ù‹úðr‡±ƒúÎúòÏáÉ'kTG_9çÔ5U0¹ëÇ}p­Ú©“ÉŽ?Yß·™ Îî;v;n€øg±Úõ«Ó¿øô¿¬ÝJϦ àæR]‰FJ-„a< ÈÃÐR‡b9ß ~™¯WMè|/s!Ȩ|ëþMþœ{—ûÿcú¸â…¬”3á~ïÓ¬=[þ’útç@”¥ëfš_¬åÔ °{W½½¿°Šk…0¯oÐÙx˜#Báºb‘†S!XñÄ"§­E0—¿øöYA#@Њ‘©? µßE%ƒÝà–fG XJ–…«N&50†ö£ØóZŒÅd“¾ÞÜ<ûÈx ð{ø»¡ß(LLË¿ÙÍâ7ðgÔ_äñ9Ô‹E3Ðo ïyLˆ5ùþD¦^íý‡â¿»Ï"­¨ ÊÝéÞ±ûâèîßG‘ôS ’:E™¶ÿÇ0hkaV¨yzJöí‰jÞ5y  jÛ7ò-dƒ_׭蔣8^%‰¿Y?[æp½"ÜŸo¾j¨¼¶øÅýÒXQðé:Ö_ç`.ÚFx¦?Eã€@u/GqKúÓâÈ€mà|ô¬øÝ‹|u>Æh"¸kÐô61¶xBm+^y‰?±ý<ýÿÇÔݘI0¬ë›¬+~w̨GZÐŒJ3 Î¥‡^“ç^+B!U“×AÜúÊpàî5øbõï× êIà{çu}¯´ûëñïnï‘ê'êßÇ\%#1˜ÏÇ^¾úîXSäÁ{MÁ–'ëÆíuDšKHB¹6©¤ŽÊÚñôߺ_øô @] PBÆxŒHj°’¬$¸Ð¤KÀê%÷£# u¸¼JÄvèÆxïÄο7”‘ãß )ïäþ€¿ùú4§M¢›:÷Ç€ñ¼…7®æ¦îRÉÇ3óErØE©¤ŽŽ* m/Nü± mpºBÂuBÙCÙåÿŽhÉ4Q€eš+ (@ÈÀ@eR¨É­‚šìÌŠQ1Fr ¢ÿsÆ[ßÖ;@ÆÉú|¯¯ž+é£?Cõ/åçQ·áøÊ'v÷ ÇùÏéËW¯O´GƒúuÇA¯©_¨BûÛÀ£ÛCÞ?ž§®9Jàþdm…¼èÞvz êzÅ¡•ÓÄŽçû`ÅŒèÿŽAÕV:T¸0£V! gœñÞiÃL2xàå' Αè/Àññ¸ Yª(}Ü%*„­hSYDD ¸4Íb÷ÿöÿ7ô^‹¼\û§L¦!D tC8©,Ã;q>'àSøÙúÙ9|¼±—áüñ% ÄzóB}¼-oëþá_ÏŽï_¶Ã³¬(T…{Nùr¸o‡¶—´Jë"±AœLzUãa(€ƒM*íiÇb¦Dü ý§çþ=aÑ€ÍqKCª8Il@5†²D1F¦PÓÒKìÿ ¿ž‰5+ÁØè‚4^)" ¥¨¦bc³8F2ùýÍgÞþý’?52úÐYŸž6Ól¬ËîÔK85z>¿'ߎ³’c*'¯ÜŠ®m}ZÞû#aKœF8FÕË9“ˬéxÕ`Ðd«Ï"+LMÏÏüzŽ’÷•× Sãƒ&?xbâ èмö¢ô8È5y(RŠ´wç Áeö˜óêýÎuñº¯Ì$å;IoΗ«:ü¼~F?o•ÿ'?Gý~¼$ãåÐCiØ×Q‰+ÏÞuý·Ð='R,@‘I­Š.ZTj›IžH2ê–(Ôl´ T-ªÁ‚”gÌ9+)WnDàPöK´:VP â„-¡!ä…C)¡:$cbi-gš¡Çz¬Ï‘2ÔèäH!ÐU O³×ñî3w€üõÑ\÷ÁÚq¤víl[룊"såžN¿2ùTû%{ýúàNÓ¨V¡µ½ÜΛ¦tHK1uSJ;–Ú"Ò½ XñÝ ‹%¡Zˆ5dzÂQz;H%ñè¾–Z)¹ ô ÓVÍÔVä/*} çI„¼t’c^8Ÿ–ûwˆ.̼e¤4ÚÇ·gÃóáÿŽeøýíú_>¯èú~³U[ß"u­*ÒB#Þ‹áë€6‰‡o¤3Æå™î²@À¸æ—´N+"cYJ ¬e#α.è <â|Îtãÿaàõîõ¼ö}xN§®ó«Ó¯GÁCæŽëÜ3 þ'ð:^ÚÇKÆR@´V¡¥…‘½‡h»…è¡ !ˆï…õgFQD¸}Góÿ°W£^ÓÕÂx Ó§“y@ 9³òZfp7°“kßuôý;zТçeÇ®º¶ðå£âFZHÌ ï­àR±&TEè o|¾âwïÀuÇôN_áü|sÇ¿—ߥïýÂ˱&ÏÝx)f•«{¥Ø—~{â7Žã]Q¿97ç¹Ü%I ·{Eñ=`SäïÇü^Äb>]wpur?èÁ–Y)0Ñ ‡U•°$óg—«{àv¿"V‡Í¿·,´Ã°íx=Žò€ÄzÇ®þúñ'.Šøºy;´¹Ì]ü“<¶g©e¯ü_QñÕ>³‡UWô#ö/|xƒÔŸdü¼ Ÿ+?éέ^džÚp„_±f³Ûç”fVŸn?ûeÕËÕëðÂÿñÈ1%¢% S‚a{zìÄèß8'|ãÿ„’àø`§áO«þ¥³šÊ…ƒ çuð…!AëoFHßÙ¹Æ úA‹Ùa‰zx~ñã%UA¿. OrïŽø`bñ¥À`¾¼àõ9Ø'¹á”2”!Ø&-…?.~_òÏ;È-a¯Æ¾c«úql‡ /É$-@fs²ëÞ“#§;|ñkHˆÍÈnoôÎ4º{å@ïöSΗ)x#ÏŽÑÐB¨³ª^¯3°"CXèŠ#þ/®FžÙÿA¨oÚ­§  (K`€B´R(ꯄÍÇç:÷SŒÊAkI*Â<¹ùãб†%5d“ËïÌHÝ%E¤t5³¾·…Áëš:q¯Dw¨÷gôù†ûù̱e£0ÝáQé°:ë oTñ뉀WÓéð,3¾«,è–è‡:lF|qšU o`Pr€¦1yqþóôó/”'0–ü…Õtš¼ÃEê@äî{}_øÉ*ć®Jw¢(NH£Òþ&¨›ë8>Xˆ01!€CÇ\Zä~t¨lý¼YÍÆÏ-½h*o`rñί:Å1€Æ‡„g&¬!"Zz/OÅ]PØU(ÄŠ×Hö‰ªÊ„÷žË}xáÞÊKÇÙØÊ°×k ·¸)«…  R6•Ë®0Ë9P7 :2îúï}õß•Lò{ëé®üpÙH±NloÜtßtú×Óý®Áù7–®‡#1È„(wþ»¿·ýÿÄ%Ðì;<Táú°‹0AìH C@ólÆ3›0Þ¡Ýø_' _Ta›¯–ë½¥Öáœö¿çƒ¯X]-Xuvò£—\ è£Hw¹ß0„PÑ5(åtqù²Êcqɱäà `AE²Æ?bÄFB‘Ñ‚P †“K¹Ö–`K}3Ö¼?Pk6ÔõÞð46Lûuõç…H\z 0ûàËUB.(€j%º  …R\²V¢V  €@’€SžÓ3â’ˆ,çgú†•<“÷ëøá”X¯AíâÕR9 ȆދÔNÜE~è—å½8¾gi['_ožôwœ!X"e4˜ßwŽÇJÂtr› m Áð¤tNÑ©ÇBä¹ A±—”½{§¦}ùÞŠðÖ#ÉM³ÐÝèwÁlà \,M=ò¬ *…syP eH7YTÚ>h—³ÜZ>7‚ËŽüùï×ßÏ64Òïd)~úñÉQõd×he,³ÜÞ¨¤&]Ç,J²ÐPÅK7º<–D(@¤x–:1,@.’uŽüCŽâ y …t× ^üwKÓ£õÌ+C¿Zvø>x‘Ò>Ü·ç8…J)+dšÓN—ñÇ2Z®é\6ÞÓKTÄ­Uêó²  ö,¥ZâølS¤pï® xîÇpLÕóà<Þ Ö©EÀ‰Ô×Îr¨ €†˜•ÆÆöq'BJ  Q¡ìà.áÔCìlôt=qUUEš°4qà{3φ|øúáNT I¢©Ê”%€h0(,f´MPœÁH>Aà ª™‹%áQ5™/Ôö»—PòÀzIOÊûØéظªRÛ$®ñC CbZ €ˆÀ:Púåô¸o%Á@"‹“ºøÏœü!QDݵI±[Ë óì{÷£ù㪀xy—}ìp¬€2]½ïÓ¯ óåƒʾ¿×¼{å¿PŒ½¤<Æ:¨KVòõŸ"9—ÏGÅóç‡kø?¨uþ=qÛÏÛ³ûÞ;æ‰Ögåo¾ àT*vÀé{{ÅðxH£gŸØôå4oT Ë0V #ha¥À(' )µž-ó·äµ­ ´Vfõ›Ç©O ²X£ ì¤O,’’Š˜òÛc8 œW³l™ÜËüïʹƒcØz;ß¾:b#‹ìþ~tY³`ª€ªÔ.Ó ¤"ØRvJÞN<;mé½ÃsS$ÂaÃà7@ Mòfy‹Ã£Ž©hÏgÛ" ¯eî—×AqÖÅ1f¬3¼õÀ:j‚k³ÀTœNjW·1=ïp/\;PJ£¡KHÑb(W&G“¥l@@¥uŒ¿Y¼ü"9;ªé5 ží–þÞ=^}¢ |oìŽÈßäéùüq粟’´ÏA™ÇŠ’èêÊŸ¦OûÚìxÒþpýù‘XTEî ìÅùM¢1_lð|½ì Æ.÷ÝäPõx~@õ¸ž9,ÎWèÇÆ{N|ß³ý¹ýŸÑÿ/yÀl~ýz—·ú¸¡©q= .ž'á–Õ~h­W}Æ6t7Z AjñÕûÄÿ¾fL·3Ùâ3!Œðó°dU YB&].Ç1±ÉÐ ·àâDx*È ë„…fŒƒH°´ÎÞˆö3ä|rÙUSÂTàÅDÔ, T /ÃÔn0ÀJ`ì~ÜžÇq=ŠÈ]ðàRF ×uw:òëÀ. @B´f$áM@<.ˆKèìâX…úvû)ß©œUCØÿ°ßÌ÷ì9Ûd«Ö+®|+óÏü·ô÷ãöó¼ÜEk »¶ Ï·ñk* j$¹ˆ—†È:ô–|@EÜx÷×û¡Ä|¬óÐ÷×ëÆŽƒWǯ·€ þ¹‘X© ö×ÁÍÍ]•—¡ûþ>¯;1>›Z¿ÝǨ|>Ð|wyßßàðx•øüø9ˤÐ7}\§›¶_ ‡ä(üãÆC¡Ò¹ÖNùoÌÎT­ÐTÓ%(ÌxöˆF+hí•Q 3o¡KOŸ1ƒÄ2 …A™•rļlØ …@@Ñè+VK†lú•×lúæðÑV—;Qè^'7ù·}Ä™DNýWÅ_9èNºO­àÃQP0 PÛáOX&§E~ÅA )—¾;’,L`GWx,†ÀYÅežw·Ò¦¤‚À5+~¹¸ãñ}yùá6ÐÉàûó¯ã®yÉñž§‡ó¿ž;ü_ñÜû½ï¬|X}÷ëßߢ“%±ãG3ìäEàrÏê#æ4€{>N;þç¡ èvÿ3ßïÀ·ÊûÔxÅ~g,^‹<¾">Þ~sz;yèÑ~N¿×õÄ…öö¯ÙÏüÿ‚|øOdßÝ4óâÏÓo?’|9ý¹®{è©ëªýjõxyh"-E0€4:Q) @ª!z&a3™—¥4i¨F,<8o½Þ••Ó7ÉE<”C‚¯d´jUQ«Æìpµë‘+Ê£”ZÞQ` ¹…n|¦%K‡Ë ek„ÝåÏ!*Šö£ŽŠ Mxh$M$Tê¨*†Ns4h*èBVs:´fk©—ßið…0;B^Ê[LKR†"Ê€SŽ;úq+ „"_Ô÷m:pìŠkºn8ÒÀÃMõ ñÒ?ùs%ÿ:Ä©3‹÷?lÜœ5¾/÷ÿŒˆ–|„|zþN'Mò·þK׿ãŠ^2ñäŸÏý¹i¾A`f2™ˆ8Ñ(ûöíŒïÇ geò/ö<©¶ªÇÏ˃œZ¶ÔQ#}¾}Ÿÿÿ©³îz|s¸D÷& ›œ'õÐüŒûŸú=בÏÔÊ·¾‚ÓÉìñ9íß·çÍwx¯77O©¿¦~Ï3@™íJÃ_·[Æ'¨6Ž‘ë¦)DL¥Š©Âô`MÎZˆÀ ˜ÚÖ‚)¼8ˆ…ƒªìg„ÎB³boEûxn9s‡b0ä§O6œË P²Âºh~?à'PÓ´Ö=þg‘1ŒH 7² Œ§83{bùQ†ÓT~,[Wî^œ3¥KAµY .¸ƒt°z4ã@@¸ùÞüy„ ƒÝï^) î¦o@½ˆ»½:uázÌúwœëO{ò§Ç®óŒØ>Hþ°Ì?°_†ŸŠzå4}ßÁóûq=_’ëÏOÓý8ìןgYŸø¡F¾Ðö³ÎpF+¦Áè€xD:‰×ûð£tWè8¿Q÷rK.ª!GO`¨®ýb˜û Ÿ¡_®é%óø~ÜñNè,üظ†·wêÌõÍ%~e—Òëï™Á‚y¬‡ìyÞšÛçñ3…¬@=^öÿgãŠ^½?[Ÿ%Äù±ä ¡ltÞv<¥ î_QÆü¦@ÅÔ …%ü<0N-–~ÆùN ¥¤€£D{QBÈà#0•=‚ÝΫ8d(-Rœc/B¼ Çm]”'”aÝ94XQZ*‹Ð2¸Q8Ü4¡ÈEÅ6È·áš]ºWèu9± žK ½”ƒ¹62µíe 5Rĵå{qÆ 01@T–‘ CTvÿaŸÀCßKû¸çë¢,ikªqYÌc¾h8À§ %‰ÍÕ‚ðhâToÉ?1~.÷óÒvzº§u —à’xÇš«A‹¯NÌûñŽºEßï_sÖŠó~¡ó)ì?IN‚êu%€ ,‰þånÀ€""½A¥™ÂÊ,¯œÒ=Ù±iÞbÙTÎÚa=xù/½ûzìÏéyÙGÕîÛY/SÇM.#Qùghç'oz½4/‰xBóA¾E[Þ™ÌSAHùV³õ~^˜#…-z&ÓxÍ“&@¡ 1…Æzt(Ø)8¹EF¨1] v4èË EÓ¥NŠp›Ž ˜Ì:S¾¡–àß‘ˆC¸x°ÐE5T¨#ÕX]Þd²Å@óÀ£ïƒ”Œ,»Õ†A‡no%‚Ä’æøÞiïÊÒˆå¥íTÝZ+@€ÄB0-ÉÎå4ìt±%%)écÅmØ…"÷xgÞ¨ @Ö@—HÆðµgnÓ¯Ö(˜áÄbÌv+¬î5])…yΈÌGµè6®,Þ<[ŸqQ„Ñ&!˜¤ t¾7‘w¾¨oEEÏ%ür€íŸÛÿ8ä#Ù)ó"Mð-¯=Ùæ3ÞŸ¼ÇÑÁzBê4äPòæ@”a[0^ì8ÊAôˆŸíì°[ƒhu¦i;F#Òõ‚y·öâzÉQúü9Öçà‹TÓ¿ÁaÊSáš^Ž€…Ã:Ì]"ˆU[PJ&WA£Vª(àZ{,0­¨GMP“Âä¹è !¦‹}ÞþWÆ]î;z BÈÞ¼ ì•à@Ô1j…¥Õp ¸2•SߢUÓòDª{š:ºÆ„\»^PBQ.O€MAIq2"ˆÞËÁaHÜ83Í)ãCË6==Á¢Ç`v×°–@4x×â¿pÕÒP–BØ7°4ñxÀ8dŽáE Òâ'+Ì ¡ïñ^œP«2)0„n®oF$¡PÚ¯o|1Q𦫤ÈRù(…ñ*KACo©wä xz±Ît…É|@,Uà TÂûŸ‡MÎTyÝœi·$KàÊ—×F6«ú2ëÚ¾dâ…pRž‚„è0‡—`ÅžÆÉ…Z©Û½N¨óZ²16"ÑÉá©FÑ硘$BøjwÅd{ôó¾ß ㊱#ízܸà´ï1>¤Ož<òlÚRÔÉÈÿj@ÈÑŠ<…ª–¡†ãºn¶¾_)ð}¸RI|%½ DªªxüÐ+ú!ùýÛÏ/h0¯N»)Ú‚ò°$±@+Û¿1¬‚ ¦(« Z¢$ƒcNðIMÁa”žkÊiQ@ -`¹ÿ•Ë# `<Ìø½S”° s¹Ý=›œ$Ø2±ÚEöa^% ¢Á‘Tng¨'¶Õ~^z÷ïß©°JÔƒ!Lô:ÅK˜CZ£dAxà&F‰! &ˆb¼Ä`H:°t®ñ÷]+ÞÈyèh–™ÇÆÁü?GÖ—#TJÒö[Â뢀 ©òTy2ª¤8{P¢”Ñ÷”T@ë+|Ò‰•EO…•‹À–e0¸[Ä‚Ü lèaÃ8d¨*­ïU¶9»7’@z D%aÑpÓ QJÔQ[ø Æ7°«ÔóË[òýV§ ‰‰¥[ùŒòôpß(D&ØOºðwɯڈ°PVP/š"VX€ö™r) íéõØÌ=ïEÛô¾k»þ%×Àbæ [¼×«ÅÜ¡ˆ?Ì^Â"– ŠÔC¿[þÍDÆ}øýùÖsð¾½\˜ñ‰Ù ¯¶q‚úÈà6¬{…ƒ=÷§ç¤åŠ8Xºäq;|ì+D²ƒégov`0p.€IRMÙI+B˚تv‰ø^ðù‚¦žŠ5³ã‹j¥Ë`ÑV¸tÀ…í2we§º xÖEH…´ ‡LE¥:åf SwGA=Dòqv%Ñ]ÅðÀ¾8ž'¼GŸ•f@Ø·‘+a˜V.4ô`âëÉÉB4AH·^ …¥(j<³¢"(ÞÙ8rÛ¤¨ÁXÁÔã8 o¢MÔ*™ÕûÎS’Œ"xBW=Fv\8•VY…$¡ˆQèr¦ ŒùVÕ(Wyßq¡\Põ¨;¾½cxW¥ ªìÔ!5Œ•8ÌN‰Cµ2ØE‡<(PPÞ)‚Œ63²é’Åø’¥Kà¬ÍÃÚ% &4õÈú‡Ž+¼|„³œHsi°)HyQƒ]ÛÜKÙØ~ç̼i>Ð_Î<ÓgéÅ‚ýOsåþymú@ûôý¼x÷ŬÀKw•Ö-uÀZLV:Uâ©ÒÏ¢Îwìçi)­©×éo3Âj$ÿ–ö¯U ¥†„Å’‚=ïû&Š˜Y^ukù^Ó× Z*îëHù &€Ø^ÁÒÜ”®2˜°Â¿¤/ý¯ î~Oï~ÿŠ+‡ÚDý?ã×*©ìöŸÏ^;WécÅE3ÒïÉ ù¬ýU€h]Bz!«Æ‚!ÅòI’÷"ƒAÑŠ•†›T÷“®+«Eußåøo‡ÓBz$t³%Œ©Ù+ }üsÁŸ“ø|s“~M%'ؤ<<÷Öˆ=o¾ñøyÕÈ£9Z5IÄýË¢DÍD¼”+;s»çE(¥ˆk  °…FÎÖE¬„RD¦«ÄÈŒ£APdÌl}{–k [!ou³zÞ€{ÔznÞx"é£ À^ êÛ4$= æè—䀶aøvsráh˜lB –=ãÄ)¨¨ADéb/}}ç÷hpÂD´- áFXm¡$ÂDÅŒ+ ZB[©ƒy"eQ@*Ô‘ï¶Âg7.Õ*Ï,Ùãùcí2Jt› ´!=;AJNP¥ªÃì¥#ÏkÀ$0¡èEÙw®6 `iAý€vµ…âT¯°ƒÐšuT%p©MàRÃT;£†W¦Á «lhQâ·™V©`0 P„P„,6|:í§?KËùúzg+÷"ýCÿÕøîþOöùÚ÷úõÂõOßß«ïˆ"ª»ÃB2SäœgŒQ\Ah”ƒ8awdŒ'º KÞ²Ù€«z´ß>V 9鯎øÿ«ý’q7¥Óë|ñÓüÕ?··™[åù>¦þœ}gÇÖ€ÓÈ=ËÂ÷’Y(`ƒ¾CÛ ¼* °©MA,H])Û9J€-WCº½Ñ¥ï„9 a—;\×"m†[E„„”v®t 8ã~m'WŽ«ŠlÐ óÉ^ó“jI»Ð'=<ŠÄY–‰ÜXÇ^ùw@óØÔ躳8³€Eè´`»3…ù¬6•KXp¼DB†Îâ§xö{(ƒPß@*ÂáŠh¡@íDwcTp¤ÌEMËôÞ ¨Ê=€ ²8Ø“«Äå‡-QéC‡Ä–ÅcÍSGÉHÑ{Eù?~[-¤`¥þÏKxhBÖ kJð\9z„탬Ê5„²·ÈÅÕ4 ¯LCiÇ‘%‚…ð@X8¼31t >·ˆ¸ ‚ ½‹[ƽ™ ‚H´×ÏŸD4>,½¿¿žâ<©>õ¿Ëúó£~©WÓú}r_àûqëòH¿_, ÐÍÇT/¾ Bd˜P&â|jѧ~ O.CøŒ3‘&ïÜýÿVüs´|Øù¨ÎοÙ< ú¿ÙÉ/èþÉOÛãß“;z}~ÏL¼Ë KQ틹Š_<&ì¸JÑVˆ hÕh”¬¨”D:‡ð”EJºZ$ª™;äG°³1Á˜“ޤ³£4<_§/N¿l+×—øäh@‰$õß¿)Â1D”¤‡­÷ÊuÑ$øS¨¢ Ç&¼t‘u)%0ÔAˆÂˆx»ÝâZI×X°ÌòÆWÁtJ ÎÔ;¢Dn1Δtú€£ãÐwHmˆƒSD0­y¿,µ ‹Q_‡µ¦œMX_)³ƒL¤'q;"Љ%D {‘á•Ú“@(†A#ID vŒ–okVý& |j’ç¾íã üÒ$¢ŠÂð–ð‚RĸN÷fªøáÕ4Ù¶«„ê$)‹œ²"D•´ˆ-pZˆJ-±–½Ê‘£™~@΋V¯€<Âóýü1/º¾÷Ï>ts'Ïôóι}éûÛ÷Î2H­ŒGÏ?ŸÈÞ9öPP¨tôZöDD†”#®xœ¥€º¨Eµ¡ÐÊw_ñ¼ä wôÝsr•ë´ÍŽI»þȹÿaþ¼Aûoäêðo@è&ÎûpÕ€)ˆ*¯Ia^7‚ªÄÄeZR‰APF!(Ò/HðK‹iª1LÄ Fœ¯V R¤T[§Â0åÿ¸ì ‰ì±ê\çô‚ÿçôÞtáù¿…Îóñòuëï—(éEOp%¡© ƒªÝ.ù üpy®‚%É@°ì!+ÝOaqùâÌÐDB*‘ ÑÙ¥âS'Kb1÷‰Þ›Ã"ïXAU “ªpEÎÄéò¯O2Æðt±gƒÁ‹í+‰ˆ!CÀ x("!K;]%@\´a&X€_b‰S«šgVÊVböâÖÙ]m#Â[!#CÔu¯ÃÀ)zÝ )ÞȰo€ù±¥é¨Eùe8)D¹iB ¾F¢,I EdoÉ…årĨxÚÊýÎ8'¢ Ý0 ‘„¿7ÎÃ>xù¿g\ýéß¿ ÷?þ$ý×åî|s¨ÍòŸà}¬aÑŠ 5×ã¹ÃEüN)"RBìš´å%“¿è9þS“ÅG¢7ßâáWŸA½ó¿ñ ¨ÇâEÿc:l°Ü^Ó€lìå=žlýù$ï)ÛLftëÈúÀÑ3 Ú!‚¸GIbŠºZ+¤òÑ&ÂCÙ‘ðs¡ICÔ0yè} ƒQŽ“æ¤°€Ä¼Ó«QŒ0®½…ìóÆoÒ€eº"^š£•ñÆ£ *)#=­Jqˆa¡4VižHZ¡ËÑCŒDb°La9 é=½>㌯='æqýƒµï²~>;Ú"cÛÖÂôø½ï í–|•þ§Åãt’ñ%‘ÖÕÒ ·'°Q¾À¸a¸¤(r{úèJ»CÒÇs¦$±tð€2$ŸKÒ×£ú/ñóÀ_Rœ·º×ý“­òð¼quñå×ôp){oOÏ™õùg«à­2‚mDí áŠE0aÔ˜Hd“Š$ÖµhÁé •N4ŸAiÜ)$.`@½X?4~võ¾…¢„{Ô#oÌ,¨tªÏƒ"AðQU +XR&h(S†(Ãuz>HüÞJp `…ï(¯Øi•´Ð`Ìá³+eEPq¶šx¼tnTz)ázë£ÑÁ×´ˆŠE‚s̼nN„½M›+yô>@è™î9Ú˨#UµJ&“V›»PYÄ’¡c ‚7ºaáJ’X…T«Q½Ÿ//{T i…C¹ž»âsbN‹A:[¤uÃenŠªá8 hµš¡¦x~œsRô4¯šÛçÑ\½³¤p¼=Ÿxot(PîÆÀl ß<¼XÃtq½ãBž\»áƒ)]Ú§¸¢æÁ ¡àz¡sΧg3$")òÜâÞZq‰ò‡×ü7!£øÖsæ תò$0%#€e…Yg®¤‚  ªS'L˜Æ¤¶=Pï€J5Xµ‰ñH‰#ÜÖõç³#~ä.ý<òƒßPxW“s¹þÅQôs¾]ãN¯°âhíu÷C׿ëV×¢°²ïwï„¶õºÞ£¿Ù³’u¢Á,:¬š«‚þ“vÀP‡Í€±Ñ açÇyÌå+['pDO~CîàÄ‘ õ”ʳ¹wA0@X` *5hqˆ:0`ëƒAQìñ¨*öj*¤iÒÁ^Ë4•/mœ ÞÀ¤Ÿ^žü9š‹tØ5èWLˆ„G‘*ÅX®;¬^åî 0ÚÅP¬PômÔ#%}±,膺ÿ€à(@ˆ¥BŠ Š´é4Ðá ÑU‡ÈøJÌÞ2CŒÅ«f’ù~ì@{¶<¼ È9’šŠS¥{§Ÿ+"Chà-èËaÃêÆ”m‡WL |=ñÈ/¿)&‹ï|n=KYt ‰ÚË¡ÕîÄ2"©h-‹þNcÜôt<âÈ«£ª‰g)×n78 a"š”i@¿(3}„ž.zFDZ /šðTÖà À„ÚŠÙ á D0Öµ€‘Xx/cç)ÂÜ%±jXPZ 5ȃqêuÙN×QSI߬çÍw¤ÈŸ„úà+Њ DÃÌ$ÿcáý¿ÐÿKyúàxÏŸÙã”T5ÓÐkñy;½î†îš1ÃB„(VÑNÀ»ÊÁ¢ª*@1ê옪q%bº±‚ÜdÞ[cè­c@­3¦ ˆ7rõì)¡ÈáY°GGÇÏ熣&ÙaøÂ×:ƒÚd¢’¤G¤á)¨E­ìAÜ`¢ãc2 ý“×%ú†¢¬ÑöéÓ„>—KÝó¨74éེ9TGjÊ¿»leÊËÈQë;üq)!B˜R,ì) ¦oçû Jÿ‡§ösºØÚΧ¨ºü÷İ^ýÊtÿwbxµÜ#!Ú:÷É…ÖÍp¨ÝsmD€ÑBËä'g!¨ŠÓá#â.8$Öô‚„‚ôéf/+ K— Ù>P’ñ›„† ®Ã V2ù€mý3ƒ”‘,@6j¸u;ÔÑ( „¯_ m=ðT*²a5~zéJC=¸JÅh.påETóáF&;„4âŽÈP ÀœYdQ mYqUC8[’Ð6°I®­ØƒÆúÔ¯‚y_‘>^è¿ “å ~\HG •»øR8ïuhF=®wšwǃqµh&D>ì<ìÈ+ Tã ½œ¸vD´ü–¥1è$ @²Ñµ‰°N:Â=X´³ØoÚÁéñÂ@n ‘eÉå×”ˆR‰g¶QÐcû+Quk®¥BxÆ æð QQÙà H—©xÐ6'…öŽvšÊyº’MÅŸž¹=´;|°*Ì\£Df“Þ»è·#Œ€ú_‘8‘ò“mmUR°(ŠÖ" ¯bå€ib9_¶ê±{p_DúVCBpjìÚèxž(æèú4·®Mø)Ü~~x*m–~dŸ‘ÿêÞ½›þŸîôÃ/Ø|ùîyêr2w⿤åï…Ÿ´>¡…¿]rÇç óägîÞ7  É!å€"@ñ@ó‘ F ‹³/¯µ‰a³TMѯK¾ÕHˆè™: *EaèmáH+¢XÅR·«ÊcÔB†¢ È›2/<¨j$¦QÕîšéz8Ûš¦žùžiGÔ¼Ò›’CÁ)0~.ÂLµÈ õËJT‹÷]'ï£3„]%Òã£ñzóß7þ— †ÒØ` C¡äAp-§ \<âø3)¬ïŽÎOpí¾ AE„Px:G@@êÃÊnhù7vÆžÛµÝôoŽ(A³¡¥ëQŸ(1”¯EH«ce»ð±³ƒ¨=¾{`móÅÀ¦£GV ]ïÏz¬ ijB²´( Mxœ€í5A Ó‡9Ñ$ØÊY7 7¾öeh¨7ðÈ%"%|¦†0úâ¶AÓ m%!4šrQˆ…U=©9( B¿±iÈ0€Hí*Òø•yçÈ ‚b[¦õö¡¼`b„‚ÖÃRÚCJPBª…Œ¾m$AÏ·ç@áÆº=°Dò¢ßFÍ  r}t:®U`—á1ÕR†Z€€ HwOþª¯Óÿû/®–þjøà‹çyöó̵’ë¬qÎç»Ï£¨"îYSeÇ®\A ,îF'£ÓŒ°ÛA‚ñòÞ4Áª‚£‚‚ED !ÐÂÛMBµˆÆÁB˜6À–ˆ$y°L "ÀH1Þ„FV•TíÀ/ŒR˜MìÜïT»ò? ³:t”Ø()®øP¤²µ-)·UCÅsÐ$[PJ€¥uÂSAœ4 v^Ž’œ°³­ðÍÿÎÛj_aú‡ÏÍ5†ö¡¶LóÜ‚sпÇòG=D%ü:ï•ü+Ç•±Ó±˜OVo'ª,hZÐêåRÖVÁQTA· U€dáÐ'[ÐRŠ1}C†"ÒÝ6B`ƒUÀÄ€ ¶JÌÚoöÂ’D)°ÞŠ©;˜ ´ÎÈ#ì‚Òg‹E¢ö˜S4RÖ°‰Ù.Â@Ç·b ÔÐç(k€’Ÿ&cÃÚ¨™…T iЧžÚZ€R‹H”-éÊòIq£æA PÁIǬˆzÆÑýo/N¡üÇ#NË#„@l¿èš>¯é?¼ÿâô=Ïø¯ÅûŸß›X=ÞÞóéâ‡×@^Ø/Ô™<<ø:æ?þyóŽþÊbÏ<»ªþ ÆÑw8ÑlŠ`ó›µ>‰œÒìêö·¾nQ L ,$HÀáðt—ФÇê:Z t8¦N,DØá•‚zUëóÈ5 ù¦¨ C@—‘H,JÇ !¤ SˆQÚU´éCjÎ3´>mt>E7½áj¥2= Í+dà!’g|ÞŽ³âè,H@ ((™ÕB6™¥,]J…šb€H"Ãôà‹9(VÂ?ŸÑ M”i¨ãy÷ouÄ~oˆ𵋮ëÑqÅ;f „ ­jª½0n]|gÓ<½ׂ ½7©{õÍ¡ÊÉ‹f'fðRôWâÊŠGÁÒŠZd­]ªÁDHqj†Å! ÔÀuIrTŠ8‚Î¥*.)}P|ã]ÌáB¹ ðlµ"XÁ"%W@1Ê" ‘GÉ×¾*Â×`¥0Ž’•QulÇmá ðvð/êL Ôœ„RQ h_!$¤8ÔÊFÅNA€,˜ŒBz–†¡ I¬qx'©ÁW=¸úóÌî„°ô=å¥^~ž¢¾ÝâäÉ„…aK¾0ÉÿÍ ÏÜþÿþÏX{Ì ð¹báâ1™¿¿ÓcÀaSuü3£#2ðZ\4„m”VT.Âó¨V£D¢!‘ŠÀ¥j°ºcìL[5× Š«¬(Û§œtÁ“ê2‘4:!3¼"ÎS~ $¬Z=óÍâ°ƒM H| ŒTz?t¾:@4‘wØ^JM­ k°¶·ªp!ÀP¾%ÜŒ0¶8 ‘• La¨ÃxȺDŠ%\¯^©Ù¨—²#xfÑ%,‰bå(Ñ€¸Õ‚ì¢<šÐ½â”ð«8£!ŽªøH"åŠò†^Ò”`è«áO†±ŠÁ}¿Ækµ~Cr:p±ÝF˜)¶üº³BtêZ¢þŽþ¹ëÍ ~Â;LžE' ¦—êÈg4Ðx-~\ìéÒjšñDTñåe(ãlë†\¯ê§nŸ”Nt;&üOå²øä8í ;ñ4ïÌÎìå©Z™ÈŒR{G88Üwž“Jf%¯ÍĽ4 ©x ÄŽXÆWT­ì à×4O(+g¼tƒ¹Æ 6,·X0ƒ ÜÈiŒ›H‰ ‘4%‹²‡WH²á^˰UX÷àgIƨ(7$U €£zàq¥!$¢R){¼—cC¡F„`ŒÞ%Lüž¦¦QÇ’t!@zlø¼u“â–:ñÞ)F¢‡KÈAôÿõ”aýƒû8ƹ ä wæwk‡Î´€õÿo™°ÁU†`ÞY¸…²*/˜¨<"}Bª4`„t*Á” ŽÁ¡“V™ß|;!J®Î µ q…ß9ˆÐüú¹" ij”—;cç„ Ð¢#{3veä(jŒêOh1 *ppE†˜—´ÕëN÷&\A_-½¯ÒQ®ïÓ'_~¡ÇÒ£(¦‹IàT_ey¥bC%a271à­P<‹+ù廸!䤗•|tó¹M@TÊüŒ«%˜® ¡ØLÄVogÊzdtt!Ù9Y°lÞê ¾Ÿ\ìüËÛHy—)× cF,fö0ï¿éÍ`˜Í!Ç0¹ о$ÙÑA>î® «‰uºÏ0§¢ó[—Ïöòš3é(Ðí6.u?ž¡ÇXFP#j‘ëäñ×>òàKz_ƒ´ „¢»fôŠå0hdï¡g˜'ñÉÛ e‡ÆD²ðù õ]4Z‰8"‡×jê :AÀ¬€DÙP[ Š¿¨CD@ÀýÄ)vgÞNÊr©£ˆó!¡b ( (´»ô2>4gU“4ÖPY„4ùë•ïè!“¡ž}ZÁ}ìŸß øyæïΧþpxyì·¿WR't½þîϽ ‘X£Qðñù¿gûª»¶½×Ñï}s½sPý Sì·Zð2ñi´ÞÍ_í c*¾v!".ĊÀƒ@·²ÅWÇ0Aª©¨Å ¡àãÃ¥bô cÜ…th(‰—Ý®„´iÀf¡@Tà¬Z x<'º#ÑM#Ã{ ÁM«k7 ¼î®ÚÊ@ H·£žêD¬ ˆØ£·¯žN…„QCOA4Of+Šˆ"èl ‘A áHtA+'Ǧ•¢©P2Nˆ°ŠŠcWÈã° s>W¾*”ˆ8(†…EÈ ¨€Í5fSŠe ˆ!·«T\( hîVö pΡ˜{ LA — p·D4MM[.•‡·@Á<è;o|Ýe)Šª‡i 'øåµë-jø/êg¿˜÷ª‚œéó‡|Mv€_Mö=ÄÕRÂêcë!gzóïOw³°óŽòR«EóŽyù‹ŠD×d(-ÒiæŸ6ˆJ‚Mêà%k+gâ‹HkX€©ÙÄVÉI_À(H,)Zt„½H°ÅñêHU\E¤•^<ˆRèç$Tzwdì@ ØId à‚EA¡âƒ Ú‚Á{uæÎ.ñÜ[@J®Ã­ã1@®ØÏ¥¡Æ'}¤ëá.úÿºûŒ‘‡Ix=èS<'¦>~ò›ô(ýö½¾3s«ƒ‹OÐÍ%Ï à‰FމÒ{ÿãñ~ç÷ÿJ¨ö>~?³‘Ÿá Ó(+ÇÇŸ|ƒÑ2o^|/BÀ(´7$ êqº‡+µ`†>öЗÎÜËNë2k a ŒH‰ ¿‡mºš †± ,×`’='!Á† ‚fÒäú"ᓲÄ“Á"Ž´CŘ0–;2 Ô IÒ)IªBçG¾¹\•Ä ðRiàä< +”}|}UᆹJjôÒš—,Cy†µ¾ˆ«™ì^=±ö•!ÁøQŸ>MV¢ÔMIG%‹­¦«[P¶±fƒe€G7P¨!X¼žEqbY^…— *ø=jÏ$S1]Çœz8¬õd G‹Š”íß *‚X€„К͂" mBN×IÍŒdc£‡°¼„ÈаX£¦€°«{VÚÈ+˜¢`v}W.á®ʳ z°õÌÌÔ ­ T¸<5@l¥AÓ”§ä‚ ÑC οg˜Ð¡ õ[Ĥunª®ˆËH`ØP‰€¹Ø²ÐÍŸ §*£7ž@Lõf)R®Ü§0_@J-W†ï&HT •_0ÀŒ†b_G´ià!¬j*–Þ%k8û%ø®å%0ãÜ,j u›Ú‹à?ÛŸ¢vß~!ƒ«¡÷†z}r[ÂwènßÿXD?ÝöÏÒ;yù¢¥]ð¯eW³Ø€5¿¯¿ü§ÐýÿŽH_1Ç|ƒ«ƒÖ"r»ÙõÇÅHJjE3DXò-bhuY 3Bõý /½† ÅÆmMM÷ÅM1à÷‘ÐbçiIÑÀÅÎZ«i*;‰Ûáj.ÈÂa¿-ëž@ ”04Tlª †ª¢Ê™Bå¤,¦åLÍ'7€ èÈú¨Qt†0 0¨LWŠŒ¤£J€>Ým h:­zѪúkòcÍ:5×:o=Zƒ@¢QŸTä*¢"&‰öØHøä‹ZÃÇ‚üRÝ—‚Ñ (åÑ”û´8#!b7Qö2|ö'aUë *õ{ê$8žÇVÊ5Kò¼D%¦‘Q wêr  ´X‘ÏC†GĘEÞZ¬p8pGG‘£l:B i@Ä|Bt…[æÈ0V1]4:RvŠ Äì : '•€Â½e· 6²¼ @”wµuNÄ{vœ€B‰þнi2Ä;a F|‚´WLk;‚Åà0嬓F{  lP±AQÑ-UJÔ.Ý@vÖ½´Ã|q½1NPÈösa@„šPºjyPNÕŠ‚PËOTé”J!¬o(`褨MSîüÿyóÏj>?‡ÆÐ÷ÌÍóÎñÇ¿üÇ›Ç5övsÃß–=‘±Ó=}ûãº4«‹C(žšÈ2@8¯þ5‚pØtöx>O\“‡ë6h;»?¸E²ý·ñëÏ2=s¯U·Ë'W€Ö„x¸ °eáÀ­è(:M4 ˆ@ F D@Q5ŒúŒúhu^ù­ÅBï>eí›ÚÚ×'Gà믟ےŽìT¿?B~œ $!aB' /s€nãiè:V*Íã)¦H© ¦—·}­ æP²è‘Wƒ= aEÆè˜Tvˆ\ ïåaS¤Fi ðnÝ+çÃÍˉÐÔJh˜š öH³Ô©•¾Y‹¢ÎˆUg—¥‘ã±wJ­¡Ë"Üò¼rª„Q¯†%1Ðc¬Œ¢0RSë×|u2¾²¡E°ØÇ–fÀê·¡ ÙßhC˜ÌªAG £éG¾:2€ ”!RÚŒW§”Y( ²âÇ%µPÞñ]…,‡Ò½*sÐJ“bjî‘ðwÅí`7 †“°$›lg0SÉd :Ì7ˆ' !Þ"Yduà9`Ä<¨­&¦ÈUÆŒèèŒÇs{6eéVŽˆw¤1êp‰qÁK†ÎÝ`ø‹A¥”4 –‰²ŠÙ”* €@ö3|y°ˆAáH™‡!`£§Ç]¸¿Ç„¨%‘!E0A䮡ä+¯`FN¡VïÁ ï̦þ7Ãγ>¼ÿiëãëŸB.¾ºŸO§<ýCÛßüËhšÓùþü¹ÖtÍLjŸûÖ*ì&øÉD‰È¨:ºi™xãÁ‰‘ˆ @"û!ÇËŸ³ÛÆÌ^s ¿÷oìMȨønïO*òï?OpLÈO{‚åu€0®°,pÌʨ!¢š¢´Õq€£°:‡•.à&‰¨‹ÃdoÈ!çP7ï<\å eL?n¿ný<¦twÇë×ëûñ©ÐCO1Œ¥½Î{è$´!mpìM©ð[5ObV]û‰ ;eQçŒ_h¿ÑÇr>x{ƒ{á· Ž)Lš>Æh/’I‚z¾==Äá@¸A‹WÀy•ëÇ”•D4‹q$n‚RÓŽÅA†´ì÷:L íÆìï±Î X bVÀ0Ž2gg\‰qP,íK§²ƒ3ˆ%®‚ÝI|uv›Æ§T p4lÎ.¶®°~‹óÁ™EP‰§£Ãß¿¾'Qú‡ãÃó9¼Hh¤51¶ ‰Á Q”oª*@‘aQеBWáE‰½öä2ÆÐº•"à%?uèÀ¥ª!da˜»}p˜$@3h¬<®X$à`ð‡hWnÆG™„õìpÜŰ aX_ úN¨1[ÊôQSÀCØnŸÒWAát)ÑîÏO;Øü$ïŸônž7€?Žö >¥ôuÁó¯ð™Ñ?ï9¶ÃEzÜCÝ ¼ŽÒT~ ,U&€)5´*©Z"A‰ ;Wœ”‡×^žtë@¿04»ñ¯! >Z„è/sÿÔ6Xðg@uï?îXÑ=m)òž':q¹3ò;^“e<ˆc>Ìêð5Ô ¤A¨Ã†»ÀT>AAfÜ­  pE'‹JŒ˜CW±YÕæsúîêRÞÑhž80%K•ü`ö ¼›W@?›þ.wÊ*ºøÈÏÁë¬á?&¨ú\m-³¢Þ=.,ÑÅQaTö\ VÃ[0ej"W¸ØêŒì]ÔN€G£”ÞÊ¡\IF‹7ÛnS,8c¤Ë*˜¡( ö³³äbÎð*Š•e8YÉíß⿌»ˆ $#Í´ß\b,’@´ò«°q§€‘);¸ñÁ0HFÞÜ és¾Ö°*€¾ÂYŠÆ®”;ÂEðz†À° é®ÂŽpßZõ;!_·œ¥¼!®‡˜–ÏU3@AíB~Y=÷–ðÇ©øqùüñ5NÆé?n 0öËísuŸ|Ï•ÀŸ­¬?=懦AF€NÅ5`½£Æ•Ô”Œ *+›–œnì 8CpÃ\\¿;šÓ?_=}?ïÒ üÐß“®™ûsM¿KïÒƒ¿Ûâ­•³µ;/„7 â5—AÏ G³GY@Yn.$ˆG€x¿T"hNô™cµÀaäÁ!©@°ª¤xè^µèuÄC 0šKD$ÁÐÁ Ä ÃЗU,@ Bºˆ ÚyL¢Æ` c4Vl²6BAg¥‡}s)`_/á_Û7”$c³«¿ÆY×ÊPv‡t€éœÁà€¤PÕ€çšU$ª¡@‰ )D£úPŸ^x¿.«ra{¾·ÇžG ;Àæ4ªµ,žÕhˆè!<à «¯b‘·ï€!WÈ'šYßî~8€[X ò=öýç6ª¬…è‰;B„<ôª¢‘ ðÖ57’9æd#N×]QE€ì: ÏÏ×&§8(ާâ× Þ!·ÇÙkؾdÛ8¥ ƒ”Ò‹ˆõz¹iRŠÂ¦® 0ÉuéÉÊD¦méI­8oæÄÚÀe[ á®!ÅÆšA|   +ÇÓCèf7S釋3¤T§db‰œÌÆG‚œ+:[òjkl*gBŽÚ°èV„Jiª‡lÈÎ,QŸG†XŠÓkƒÉ‰¢…0*¤ÞˆJgÒ(‚º|•îD{‡¦„ÝXöÐ`£äKÆD1¥pš'‚õ a`,&´T=ë‰ýjoò.ÝŽÒ7˜ÏÛ³8ÞÚÓb°˜`*€þÚ´TºÁ0VpjÁJ‚’Š¥ôÞ5} s.À‡©A+ Ùx–Ñ#Ë4«ŽXP}T?6lgJ=ø«óþ¨ h[ã¬ó8ºé ëþ€KÆf>Ý"‹çÓ«Ï«ý{$»át¿¯;å~ Þ£Ãóãé{Qv?VòþîUXªïNèB{xD0ah‚nÊRžÞdE¢” KH€ ZàN±Æ]<ÔYƒ@Êv«®e¥]‰2F®Q{È~_¾IÀn¢4m L›W½°ˆ¿´ ž.F§†5RkJV®Š‡§…†tQ¸gj>»£ç’u žsW’þ=—K‡Ì`í6xÖä#2•dl è|—<<Å*‘pë*»C\Ë5 ˆ%-~¢Ì9Q£Ð¨üÉïàŽ†¶"SÀ»ÙõDåÎÉ#´ÈéëÛß*:^˜.Á<æzyx^½h(Þ“Ð¥‡ jª ¸ŽE3½WãDÖ–{5ö/™Ó¢ ¨.µcnºu®ãÉòz‰/ãç{^ 5Îàv¦Ï`€aæ@*!ct¿X‚DàÞºP¸üæïw£‡ë† Ń©^ÉÜ.mئ Ò?¤6Ï A X@¹X±m%xжD¨±‰”Ø1ŒcØmøßMà«hykbìòF¡©c;,A¤áNü‡°"Ljz¢'ÐH^´RcF)ì!M½7àЙèv®Î–• ÞÖˆôú∳*hˆê]†; #%aO|5•ÎÍ^d;×€ Ьᨃ+IU"×ZŠ ÅmÚÞ-w© ÙéÍ‚J vƒk"ÆÎ,ÌÂMŠŠ¦U2Æ`¢0Qù‰´wì$DK\:xYϧ´£_¥Àó’˜ ¶&piß[¼žÔèø?ºö2¥—É6}ÿn‹éBôt#³ýÔ$Ýq¯Šî¿s‘OÌ/˜ÿ¿ {*>|ûü'0Lê'ì%×¼/yâšðo´×ÏÏ4PM5ù"o`\âÃJhT&ÍŸž‰¦V€Mt>…¾’*!èE*ý!À@½Ba…'µZÑŽétœ@”hÕIIF$ddPOcŒÂ¸Æ÷…+<ˆÇ&t¡Ñh ÆLœª¤Á(là–qA­$®‡6 !hª¯ò-" š‘V0>PRèþ0ÂÄ`zDÈ­âˆðÈN†}ý>xÿH¦Ô;¢_6ï`‚½x­€ç;è¥uU³ÒFï‚®[oÕ%V¿¯+Ë´U Ɖ"M|¼ïË6û¥“ tœØ¥¢HQQÒÔ‹Ò@¡•>G°nBòø|¡T¹5zñž8mäÇ” ï4³×jbŠ%žî(Íä£WÕL@íÌÓ:,‡ØèÄ·Oк^‰¬½†´ÕÒ§™x©Yª!¨=FAÖž8¡æ²#òˆzóû{¾cÓ‚êºt3op 8€:Xù¡_ãÙàìU¥Äl@t87°ê"=~bÞJ$v ž$P~<‰´N;THë`›€øD—Z¡KÒ„ö?/kN‚MÈ5ñ^*)ZŽÄ( JÕ®;ÆAä‚”%“‘rQ$%^©¾t» ´úja”{À8ò[(h0Û, NóИ(Øó3;ó#À÷á…@È®'_"ŽQÀëô²‰@d“´0¤¥‰+£x@;jP;5ùÌ‘ø(H+jæqŸÉÅWËûï`$rVIª!c¸õÏÑOA~Ú=ŒÌ Ä~¨¿n|p8|¬tþ/Œ¤šµ²€VçÁë•nyÅÚŸßóȘNv†Í±êA~C¾{´,˜¾g—óyÝà?{õ~þñÑtŸ'{ë÷íw¦öî·÷Ͼ2 îÉ<ùo ¦;pÙƒk—¾$_t;•òë~&á6‚Ç73ÑÓŽY‰H Ï‹IBpF¶aD«’¢ÃÈÅÒdUò‚ ZI8Ž´”Bµ)¥í ÖdÇ¡$¡4ûkŨÌÙPɈuÓÒ=’:+ÑpÈ~ó Hüˆ+‚{™på“¢€bÐQÉ!;. Þ‚¤(w¢¼œºˆd¦ Æ¡…1çžU4L& ‚b•æ –)E‰3Òz”$êÁ㉚Hƒ´±ìå `X£¥yJ )EÇȆÖ„‚9D´rX &ª(5h!KAUHª@X©²o žÁ¢NÀGSËÂâDc$í”OÇ9&¯b©=L#µÇDh÷ B°ò:oòÀ-¥¤¯íËá£E{ ";Ž7tV CôWÔ>–@² 'Ga)›Óµ—\µb½:»³nŸ\]BÁt"ÉK)E9#1ךáõãˆBÔ¡ z^ž¡¬…±ÐܦyIÄô—Hn¥Lõò~·GDÓN¡¨"€¿AU+ Ñé •…Ep PE/x>$’B³€a.ÄôpUÌöÄIÚDßy„ìjV¥K+¨ec ’)P S#k×±Ih»`Ú¢eÔ¹Qµf»ƒÐX² À=ìà /çï•lAi4%Å9&ã^·¢@:õœ' |2vï´up:à¥)ìeõCü¾¸¢Â5Úò<øyÏßùñëŽÀ}käǾ/\`ì7°ßõó뚉íOOš6Íw;xGó•IùÍ}œqß÷ ð—×­¿žtÊpÂÔq|×õí@¿VÕæå>g7Ïànœ»[¡éCøÏ–œ^·™4Alvˆjz4 [:ލðzšØößEP9¢é!Mç£B}D vŸCÎÈ[ßÈÉœb]*¨·±L%ß 7‚ÂÐM]'Ã^áЊœ»z¯r;Ü;EgºTIuM-µ#±„DF@b²á¡ì³-tñ©¬||^%]a,M$CçpC8þ ª- Ðn|p î_½ƒê’ó{l@ GÇDÇ ’‹Añ›}*Ö'™X‘4t¡l—K+ÇîÓæœ¤,E*!F–lƧ°º.Ò~N8µ 3ªÛSà6Ê:Ж&HE(Ñumª½p”§PP‰T'9H FwZazžnñ§i[•qR-:2¥J¢ùÚPì=õ¼\hË®D{oSe×´™£Ñò£såß<!hL¼P Á8P Žòh|þKÜ¢Wß¡iÜcÚâ†x±mFn°] Ù¾rð572´±{ÖNT‘Mò0,¿b‡®<¢a¥hµ<ÇÞŠ¸ÐUQ*É.qÆÎʽkTPD øåðÈk^$xÛ+7€$I¢ºHnv¥—•eŠ?K~Ÿ$ô¬»Áºcgy6æ+’~÷ññI‚¥¯%e㵦þ+µè‡ÛÂG÷_0a`/|$Ò$ÇÚ =_áNRb—%NÃ膡¶…àš!p\Ùh Bu]žr_â%ÈÙ¾úÓ鹫Uh,úŒƒà~H?èUî¸lĪUÈ#<¢Ù‚ªÂ]›øx%ºÀÀWÌk{ž§ž#1”ZX ”Q:=œM@Èž"ã}~¨T r}ŽŸ¬-2E£„R9x͸c-”ZZ@i0 &¼ôøoRñYÆåhÒAS´5Ø/ žÑVÝ¿?·@´æBÚDTRåMô1›jmˆ…@Ú–ã ›<šë®¨ÄP†y§×®ó³ç‚âÈaPiâ¯AëŒCHMB…¶WòBÒ¨ +E:\Q8ùâ  ÊVg˜/ØÀ è|­§OÞFXÍÔ¡G¯ŽcuÝ›&¹ÑZ˜ð$étUkC=Ðîï#6¯dôÇ÷æ{"'ºKÛÁ¢’¬V@Ò5•¤ì ÷‡ Þ™`¹K)í¼9¿ŠHé€;7âó*= @3Z<j ’‚½7Êé^›ÆNi^IXÜ/&xVFÆ,¡q÷’•Yoj/¥¥I¬€˜Ó£B§a÷|pœj/öú¾³á¼räé­ôU ¹Ën‚hböÔCV wÇdQÄhíNdc^3Yi2h;»×ÙÚ¢l#Å8ˆÁè`¿Hg²ž=NI[Œ(¯†ÿ¤nY^F’X†+.`l¢íäJðíØóxƒˆpGŒA&¨ ò™ /«H¡¨hht{v.P¶ŽŸSzË͸Åù¢S(«EQSŠ! Hi jž¸û‰Ia(„²Ž98è‚\Zi€<«†,é(+%l‡ ,»¶b c2'·Hk,’•qbÚ’zP³X`ÁZ‚Ž by8’5b Ðô¹hpùØ‹ø  eé yꦠ-"/zNÊtÄfÈtvзc;{\t|¿7¢lbU$z^<^ "Êp§ /võËŠ+T uö‡.9Yr*ˆƒÒ†oTŠ,M;”ÙÝ(–;™yšŠâ€e¼*‡r”€üô_ž¸UÅË,ÑP±|ÃŒ¨Gq¨uŠÖÓ›¨Ú€õàòùåU²¤¶ØˆKг;ò9P¯´ÌLt ›â“‡B½$DñÂqxÉNÑóP:Mu& ¬à@{ç[ÀáŽ%ßH0‘*DbÐD5‰TEñ¼f¡€•|I”Š©¡pLÐJÁÿ§ê )¬j#21ƒ kcÐaP­øx)]i«¡u pL®,ЋîÎ: ‰œTBéâyoY5¸Êhô(?eãÓa Š©Šõã‰ÒMʯ„{ŽÐ¯)§À­«€{Så8ÄLú ztCÆq@Mµ'ä}ñIÀÖ`ŠÅ‚.Á]q醆h§Òl_QâNU`‰íXÔu‡Ìä+¶¨øB•Á¹J˜gKÔÄÌãÝq lò1Þû¿›Àý,_]{S¯_|^™ôS¿)î|ÞXæmˆ|ïôADD¢8ˆâ'g)ˆ!ÔÂECÐfOÎ]<êžj v€zê÷©Æ(dzš)Ñîû>•"®¢@nÚ9=Á^¥šO\VP+:–=%+ÙxÈ2î$& ˜â>Ñprî\€0BÇ…¢ a ³ÆƒáñÍ)«­Þž\Ìq/ PÆIaVES²“”ÐáåGCÔW|ãЀP& #Aq:æ@šxZ} !Åß%‚Ä|$mƒÄÐ7B©")I<£WD#Æ·à=ç3Áu¡w ;kits\Ï6“ÒrðH]¥‚÷MèÖI×cWTöHx&_Ï*’¬« ªT2¢³š¨‹º­Ä±yx©€0`îÆ)M·¤êx:’¡Áèø ˆ +JÅ7p$¶GAñ%Ñ÷ÀÉP6R3²c˜«fIAÐ§ÛÆ{QÏ|ñƒÝqùìâñT>)„‘…ۮ·œÏ°uq3{ÔâWârÿ>?€SM|Tœ* ì…ñPõïöâa²Òå˜UÅèHW¡GN™m’yV.…«ß(ƒ‹¯—îw@³,Uà†û M)¤—è¢(Á¡ÐIKh²3…{Iè ½%´ðÞ…2µíÀ{{ûkÊàv"³ËPŒöùé9¬€Q#ªbLuÏxh(Â(£qÒ"‡³Ö£OÎpÞÙk;ˆ§ ò”ä+¤üYÔCÔÈÊX€UÂÙ¾Ÿ‚ì±5™UDÐ<³\F!`6iš*ny&ª7ú’#i:ðIÉïòiþ uûæ»ÑYþ@/â\q_^ø=V'iì%_ˆò»2ˆÏÇß>oÙþßé©çX‘jTŪö÷Z÷4ò÷ÆáëTÏñŽTUÑ*x žRD"^€*fŒH9N–ô³Ý„ö&!Ý{ú9Œ!J ¹< ègöÊn!€Áhk8¨ê¢b…ëb+`W¾$ÃTa½¡#ÏÁ`àˆTºKJI>‡žoºÁýHuéœp#‚î¥;K…ó¼XÔZpJCÚÝ{‚>Šž^ï P/“ž¢e¨RÅ(T¨dàŠƒÙÃù¶˜' ¤2…]¨êu× ª À7Á^³ÕŒ©3`nªn¸˜m’ ç43Ò:Z HBƒá%|S:ÐôhÅw=àÌ“3†JzjÛ|2fVûg¿)«q"øÇ:ùƒRβüäë×0ˆ1 ÷¥_ÿÂàêßt"Ý€_ݽZMŠÕñN¦'±fǶ¹:èšáSrs‡í1¤$G´E¢ÖB TìJláÙMr–5P½îÑíà„<@Æû]Äj§µ½¸Í ); 2ðeA=¥À\|)‘¾Î$le xäo›+š0œmF‰ò4Ó‰å[Ú›kIg=Ž¥d]@®x;óã8/µÜ»Dùö|ðð±íÌó€EëÏÃÀõ€8+Rˆ ´/Ž|(ÆUh±Ñ½3³’/s¡ ñP鑬ãµt B‰½pBy‰@ùâL±Ê­x"©óU²E »J§]ù{âµÁ–ô.êÐ{‡1<ŒÐ¥¾?ŸÏúêÄ.ˆÌeyõÎ/ï°çVõœ‹ëEŒÒvfw4lÅÝij±Fr‘˜†ßñ(÷g €íÅm­ºyš„UÂ?{:b)ØØM Å…LkMà‹½‹)Õ ¦³–Ù}—GÈX铃ƒE÷°7h ±“®E.ðaNéAà\Á‘,(G‚:@"° JàØsb È$ÏÕ³˜ôHœˆ15XÚÑm]x{/^”í¾ UX- ¿¬„EÔoXfäf|³x™¯0P§Ç´¹záº!'O€ók/…qôØ€¥âúÑí]‚”ÁH–¤é†ºnp©­­éq;øwù5¿F™eEƒ•èß¾'Qˆè@áÅ‹™Ò:—¢QP 7‚SJVBÿ!]×9€YBèІ  ;S9Z…•RŒù>l°O<¤ÈŸa‚šé'™-"Ûˆn‚WÂ\ȤÓYË^à'ÜAˆgJ@qÓRI´ ‘¹Íà*ÀF§â*¾ †BlÕa¦ü“ÚrT)†÷J40-í \² ¡"*Ï+NPÁsº‘~jâšÂðñÒfµÄ»©Û ¢ÚE¼ö@\:­Î‘®”!«+àâÇÐnѶÕILÀ‰`$šÌ%jõãŽ9Æ.ßH¯îøå4¤AN»ÇlzÜJ<$KÛ\D÷Ê»†½~Ê<ï‚ø4ïÿ}sÏ\ú»ýõñ_îNáìôÓìÄâE„«>±mz“Ÿ7ìÿnAlÙ×ýÿ¤éz´¡ç“£9æúõ›äŠù§Èäÿ‘ý¹5€€Ÿ(OÍ{óÃÃæ ·2—«…žuã"¡V«ðÆ›à;žC´+H&dê´ñ¨E.Q‹ò±{UœÆ1)¯hÓ¤/ˆ†¨öêF<#ì7Ц]dºi®¶¤:¹Êœ`˜à‹OSfüŽ(Tè9q"ž]o(U”j³ø"ÿÈ@Ňö 0G…£$ˆ–*U*9'€“Ð(YŽU•òi}Aà’²,×Rõ3€RT@zFãë]æV†of;ßd3®3<ºßX3ÙéÓs¡b"*J0]V«¥Ò7àë³qÇåÚNÙ;ÝæÉBë$ ¢Û”%Ix+C…JøC8‚ ­8àŒÀFSH'èk±Ó¯^›äÎã!‘ÍXìk<æF¼d>¡bèTÕ{ÖF•æþ¢K\¿”ôq\j*‘:V<õ²â™¢TIà²í®¸mHÐg˜½¼,Ò&Tqz>ÊJP±(*úRokÎýð«pB8 6yZ}†‡~Éž ÅYŠ…÷ j,N¡X†­t©šÚtøæ¢ö#Óα×]ýð¾o!>—“„p@ÒN¿¹ç”!RïáíÏþ~Lª¾ëñû_<*écôm˜5ò¯MÕößô:Š>f~¼£ýU˜ú( ^„9ð§ÈÉ#}ð„E×iù)õ§Ï)¿ò:NŸü{Dóàøþ94u±:e3³¬ß¸à¡AÈ !ŽÃ¸Ú!r€ÐpònƬDGÏ”9׎P„1ùˆ/¸±Uì-Pïgp†lïÁôÐqí'Ïž6üT´r¨ñ&nqJˆ`ÛC0`´ã`ÍAQb ½”Í:à+'¸ø@``q¬aÐ’‡)±#28ô!LhÑdý9Cï B|MòÇåÊâ–ˆ’×­>˜ñ–D ª…è¢ ·€E+;쬧V絓‘v)DÌf·x‹…0[â!ãºVÚ…“$}ï'(Ýxm¤:Õ»d]EJ¬G¶o „Œ¤–§kæ¡ã’ãi¨‘uÈÉ(ÆhÚ¥¸”¡áDzöòã­v€5€Ë•Ìa—¡€.¥ÐžE(QHJá/®èPƒZ÷ŽLV”D*áDYú˜Ilu5¤ñ{ñìt»Ðœ@0ùëÈ…Þ5‚Œw/~’ ¨Ýš3£ß½½Í ᾘu’Æä/XfÇ7¾dZÀ…ó>ˆnyw@*Vƒ:í­´éëx‘'ª‚±?øôó\ûî» †ÓÞ¼ [·AÓànña™Õ†   ô¯³y1‰º{®Yáå/Ç•~þ>Á|x~ýÓ[Óú¸R§Ïº­eì»‡Ž €€Vdy¨_w8†'p`4; HÔ•rp@ª€ÑTö±·ÇZÕë$DK²9„ºZ½†®>˃Ì ÐÁ—øÏc¼Œ‚ £l* :Þ •’3ªj]=¹&šØD¨Ëbåxà†Š¤QDQTHQéˆó5*;žXI‹ë‚MCDhcߤçfP¢±X©ÙŒÀãCŠYvw¸^­"……T ÝîÜ<œŸ$Q £Pêù ÂC ¨*ª˜k§mφ¤b£¬r ½õÃi”Ž‘D‡KÒ Õ(`PqO¢é×ýÑÕ_¸X=¨ÍáÉY‰!<@Û¢Îûà%ôQhv=h"áñ»t…™-úO®*V¨è@‘Ÿ ô¼·W •À ÐI÷NKêÏ›ÿ\îšu?eý'|eZØÂ2îmOÏ(ÀlÏ;”êmzá&Ê%=Ã>á<^ø@E Vç| Ç[º•È»º¬ ƒ{è¯p*>ѯjºœÀu¨èòËáçÎwß*‘!ê­³IÚypP!ìeh*³¾ÃƒÌ&ê·Ç·„ŽürÍU‚§UG3g™ì µÛ:<Ó>9M”Mzèv}ÎÎ@¬4`8P` O)¼öÑ¥‰ªUç¤Ø}¨†/E±O­‹çK!³N‘^Î…nÀéø­ äèù8òþÄC%‡§Ízõa6uë}ÒtÖw”ë"k°Y* NúR@*é§M¿ 9Rdö,ˆøÖP<Ý‘A]ihd¯HÀŒ¡¿~úäëÔ*žƒ¥`üòœ«Žî0sȾs®0ÐNêZšw>µÛ9jQbÉï¡üg’lâ~HP‰Æ) ¿[ÞœK‘ü“<¼ÁL¬kÀ%ò-§eä¦*½Wõž3‡P¨òϦöüðª°ÀMA%­r%눆˜W,š‹ð;ûq6‰DmQ=D Êà,©µ†>õó®Â†¦±,÷—ÁϰԳõúø÷>¿‰~ ýîFm_êj^½<òñn« Xx‰Â xyïB`@ ;â"€ÿã¡BwPüõ÷¼`â@ØžÆß˜fN%ÜßÞãdîo¯9pÏŸC¯üèa¸ó|Z÷>ûï£uðŸß祟†ü}suEí®¼ËšRõ%üÏýã\„¤z¤§¡;Öã‹»À˜[ (劉‹æ×~…WÄ®zÒ6‰!@ÇÊWНóÁ{Oo4éQØ[WôâcÖ€¦ãSéN’õÀ  uˆ«Ý›^Žù\P]«©Ž!øwÄ10TSÅòÏŸ|'ÔJñ¯Pv—µòx0ƨÌð´_¤ijÁaI95ÒsmPn—öÜR]åCÏuÅH×´ù÷É_ϬƉw:áÐ “¶1]€†É“ÒhJCºjÕæ=' aˆºÃ»/wT¨ªª¡j½›–ñ‰|Äý¿ÕçAãgô»óËéÓ¥Ê|zÜûãqòhø²ýòoÃÇû/ñ÷›’/Ã1ò°ùùýOèñ‡ÃϯžºZF>ç~O|åñúg¬ÚE3*ãõoÆ—Ï«_ã®þq­ÏׇÜ*¨GâuŸ<ÎQ4!¢Pg D^|´ÎÍlžWG¸Oã±0CP o»ºð»*åQ²½-èÄÎ<&^ˆŒÈÇ]{ñÇ?Ô ¾ê$ùë…÷u'C¸~MâähjDF!•NB #&’Ú¦1ŸüèHÉ(à2ù'®"”­‰êÑðkÄ+òÏ2áge¹ß=’éÇÙhÿ·Ž=YÝ_³µ÷uÆÖL1[íü<{çæ3ëî³g+­ö‹]ês+zù[Öˆýx¯ŒC…쀮Q鄉D›N¹9b¦_W`Ô–u©c—¨€´Œì26y@ÉCG¦äm_,A å$u6oÉÓå0‰‰úÕ¼­I´UéÂO®žaW€Âýw„iá=ûõá¥Ì<¿Q=?•·}¾ðÏlÿ¾ È÷lÙççÂíd–/¹,íëõÿNh9q‡Û>ýúï×êÞŽ]«©À£É’ž‚¼ %­B»deq8Ñ®ëB ÿî¥D˜YàH&.=詆tá@`⸇ï/]]õーÌO—©qýÜ«Ay²Üé÷ÎÃÿ¯?¯>þ•êì§¿Ý©#Ö›ï~vó9V÷EûüÎùØà½|wÏ›J|¦ïÉïúúÎwßç?ÐüÎOÎßö™óÍ×_÷\þÍÎջ߱þ>|Ñb{©úp—/Ëõ>'åþyæé¾zê¾ì_u~feý‚ƒ—]ý¿ÇŠþ&¾„óéñÇ‹Gº@ÿyÉÔµGدçúpÐ9Ã÷“ÑóÇêSH¢ûÿ`}=íÞsç=Qý×ãŸÜ#¢üïÔ0|{/Çì[È•–Æž¾…ûüüò¦§å»Ç~zëŸP_º?vxä!à“_”N¾ýóÈ­|’¿`s1çÇj~:… qaË7ÒÅžÉ!ÝŒM@U_Dˆø Õêg €],–ä^ÃNK9™ëJI5š; BoÓ'VÆ%¯ `öTèDLÑ’ªõ[|W<ðOL yèc1Œé¡Ã‰: ºë7Á3Ï)~VY½aøï=]Ô“_Eü¹ñeçØš ï±õðçZ·ô'é>9Ðòû¿êgíÄh9‰gÚO¾|âþQý»ä9SÝûÃü¼qhj>ñ~~N $ô¯ìâñ”½_ˆÇÿg'=å0UöŒn_Hò/ƒû?Ïô™{|ïãëŸçbhï_iØò÷ÓþO¡ýtï¶CìàîÿLåg¶SþÄî{òó¹­}ò:ßïÇöŠ›{~ÿƒŸP¾¿–úçm§q×áÝ÷—ñÃûdø¾áý;^?øX?¢Ÿ½ý8¥_×õ˜óôüg?Î|_Ù¿[νÀïÐisÕ÷x*{$ E‡sEÙHgÈŸÛ˼´fvÈïÞŸÓ…G,`/ 5g‰ÿ{Ò™õ}ã<ü¿&bÁ/qÇ'¨«VÐh"ç‘)\âm a°N£àl3 ƒýª_t% TO”‚)ÇfÍ–•ÑÀðá–dÅ×VÒà-zªiXEG¼Œ0”­¨QªD-hJ¼h »Ò 6Ò'Ozue-ù4¾ylÓRÌ {庩Ú|4:˜´NÝóA" ™‡¤YoaÄžJý¿ß|x€ûQe£ßŸçxÌQpúìd-ΞZr o«û|xQ¡ˆ[áÝ=yöÇ‘¦)½7õ[7¬OÖñr¹"De6¤}6U]ø3øôŽ_;s‹uj¥‹ªö+҇͢ÕGþ–êíÂ2š;¾žý™Öp}¥ë;¿•¶í»Ï„–Õ¾U{½gWC§:N™±ï»Á‚=\蘒ø ï±6àØ®¸c _Ãþ܇Aø?·?ðíÏ€ýÿ„;ûCÿ·ÿÄ+!1AQaq‘¡±ÁPáð0@Ñ ñ`ÿÚ?ÿ¬È•Fƒj •œªù"Dºk­ˆêÞ‘.1nÊ›)N1J­·ø‡ ¢NÓSdã88¹è #í¿? ðéÐÒê.ЉD SŠ‘`¨(Kš&3Åí@m‹NÎ Á_?‹Ìó#Šže¿ÜPêãH †q™ãÚJ5Y"âቧ)˜‰ÓìÁ‘½=%Îæ] °ˆ…\%ú$¬Yl‘¬pH {`ÌDZ'0o=~þô§×/}€¶Cm  K¿U?Q'ŽWZ"ú rKZ³Ìøâë¶þcÚ&aB%„ʼnÏ5´XÀÊW Ʀ¢›#( ª.úáÂ<ÂCá'öê#’*dh:)Ÿ©œ_g°Ð€XUN—`)ñAŸ¢1N3\éP(z 罡\êˆX¹s‘¸Ï(!íL ŒâäœÀ¨·Ò¾¸+%6ß^¯z¹x\3Ä<€‡Ð]Ær‡.íõvÕ:qÄbÂ+δ5C#Ýxj–C‘éË!¼¬òÓžN ì„âfÍ76k‹‡@0' Ó̿ۯ@j߃ã³áêj%Œ[Um Óh¦E7Ìt̾\èR+ »R ¥ Ï(ܳ5-‹Rr e¼´as3à O߇!øgO<7Þ~‚Þ¯)º0ø×û;ÎÅ(Ø#‘oƒb±¥×9ƒ28Dvb\å@^- E•6œ|©ÙŽD½³‚Æ]‰ ˆ&8ŸAêF9FŒÿn¿‚”#2_äìvŠI_R§'ü#ŸÐý£€CBçA”IŸmŽŒªD< ¶88@“n¡oNŽx¬"Wy€ÊxÄd‘"˜n@Ò;´˜ç¹®sæÌ*1$åGh ~`Ÿ4®⤎äå›\s "ôä 13 =iÙ:KØFvŸixˆ ¼tÜ”˜ º4ñ=A#üdKí7{å 9= !W£ŽWaÍ<û}qó Vá§âœ}¢äÈ1n5…]xÁWÌ÷œA°Ìù—†:’’Ý1®PlN0õ)F])²ëût˜4d¡›‘_#O±st1+‘+R½,2—ú ¿+&çN2dëçØx^ ~,Té\‚U,.ª˜?gνüp7ûS|™úgÇŽ_½!ùpæ…ÃHyÄùíü V÷4ïhåèþRPêXj¾QO‚pÀBñ“p%ð_GÊsy;Æì /&·åmÉ”Á(C+ž eõU*í ÉSÚm6y,ÏÆ›át°â]‰+w°ê“ÓÍŽ زbA¬Ô×(ÃÞ+ç.Fí'3Qc´íîWõ€`E)õ-|iaÁªÏG³Õnø=Æàt9äà‰s?·oòyœÉ+žŒ¡ŸG‡þ×Ný¥äÎúÒ t`–Nüf:vva&Gä'Çe×Ýcä¦÷qê,h¬®#0ôížbEËY^–´_Ï( ”)ÖgÊ6s hJ«Á`îU‘Ç1ëf”0 ,òW‚zdñãìOíæ. ƒc2 ˆQs,±ÈÅ,ö…Ƭ‚¬ ˆFÌ®w´2òd†ÛªD= c×rÓy^¤â}Þ$—ðì‚ýü¸µF­ý@}p ujὋÀ9bO<î)Y¹vø}Îqk¦Dó¢_¤ºÇkc©)”qöK¼§ÚðD*ªÕë–U‘›ŽÓªÇcóý¹ËOˆÒަª†ã€õb4žéMf³Œ«P4Žø0ÏÂPK¾)öTòðHYêޢˡuñð.|.|q燖DnG‘ùg3£I±²˜J~’Ì$…|úXk„’Ä©äéñÆŠ­“Å…¾©ç“À DéYGÂc´·€5lÎ0 :•r^Õ¢+=…þ2ëÇöáP…'¦B_ŸÙ8̃T2‚ÌÂg5@ˆ Ê”T쓯(Æ~œwÆ ÁÖÊlg7µÖ[Î!ùàI̵ôkô_¼þxÉOY>Ÿ¤ï›0« íÄBÀ¤VRÄ9Ë|Dbecçòo´ßÁ2qÐ=ì¢d ÐÄéuÂãy Kæë«ÿ¿Û“ü)à W tÒ<—ŸUWå0pý+¨FÀ±"×@Wõ.œžûøzÛò;ÑaVõ¶aÐ{‰Æ6!5Üß[âå6ÖC¬%ü~ÜK6ï·áw®FòCãê€>ÉxQ³§Ø£9¼qØó:~åãÚšÏ2l€ð¢šð½w)‘ þI¹W ÿ‚–Ío $1h®ªX`ybìÍHeÍ13!ä)c˜FVZŽ'ÈCp|×ÏÁ„!w »òHîs\hƒåæÀÛ'¾Vs J¾ƒìC•nr~@ý‡Ï†¸ø2€ÞkásËÔ4£í%Å“©ÍIYÿK‘ ŠhÀµŒMÓ­P*d¡å@B,À%µÉSûzK"žÌøóŽêT?4¥Ià Á Ì°Ñ‘Ò¢“É¡·  LN<†·CÔ¥ó‚c”H 5½, »SÛÉÅ8Žå†9ø‹ß#ªÙ9Að†ƒ xoˆ2Ÿí1É(£rvÒ¾yŸÿ8è×}IÜß3ûrñ 6 ]Á³_”ÖÄ&b:eÜŠF©nkø¿ÛÒΓáç£Y\û£1Ät¬¦ É€â‚(ùTЋl VõOîþ0¿'‰4ì`í‡v<2ópžš—èG3ízO1EÖMøç†*%𿮉SS䘣†Técà‘\ìëÑÆo7t¢,g^òE‰S)ÔhY³p-v̰ǤÙ!Füßö÷”¥:(²›ï ï>ž23c šØm)«4jy=Çî:gËn ðy¾soNdÃ|ÿÜÖ‡á¼X=ÿ[ù8.œ÷¿‰‰Ãë»›7çô¼ŽË¥+Ån­=:δ>ƵôrRAíóÁQüýp‰´`FLàCãï¾zÝÅþDÿÃyæìÛ¢Ÿ™œõùc ³"²Ü`Ê"žN@ ÑÚÌ+óxq‚‚¨èHÊÔ (Þx%ÓŽ”Q>kÛý¸žà£[2ž|øáÛòb U@¼¸€‡m õÕˆƒ‡ÑØ*ž0OÏá3À:êSéP?>ïOýÇæ`ò÷‚'ìI£!}òÀŽÁÓ­¦fÎe¦†¾ÿy1m:3¿OÑ+Í’a s lzTÏ;ˇz1ÎBƒ)ÃÐL«£Båç<) @íW;>ž·øòëKÑõóÿ¼D õ d+’Ó8R… „â½ +}Iæp¶eíÖ” >nºâ¸šÍ~E9ƒ/¡ú€Oßåh¨ãÏ\VYKIŽA èüq_®8ü´ø¨Ÿ|{#6=`(lU°”Ù‰R3ÏÃér‹£vÇNLM⦰5¨ç.÷{ý^jÌ&ïãûxÍf^úÅÕ÷­Ç’V(¸úDæ–`ô_°Ç~/ör¾ìk•ÅËß Ñêÿ7ϳ=øÿoá¾tÓ´‡ÕÏ)мžÙìuüòd]U;û?^¬Â0ÞŒ6D¶#2…9}$L%`@ð'+hA£€KpÎÓÿÝòϤ‘ï¿öoLÕ¤sW¯=qˆBÒ Dnäj[ ëzUá¥Ö)2f–ï:Lo­H+­’¡2Ñ/|¬ÄnwéëzâX  ƒÀÖðŸß€gí îc6W»Ì¬uÙ½voÜñÿ[‰ QÐ ^} Ó.Á4.@¸@j,A“±Çî…AÂbkã×¾ƒËïÀþ?·ÀÚ¹ †]n!Þø!Láb¨d†XâÀ"—æ úŒpª`V­ª~ I”wŠ|QúìDã6âXòNcòqZ FºöÍn¸á^c½L/ryâÕ©y@ÃJ8Äß·iå+ã¢ð¤f¼6*Ã">¼m:“3ìÃ9U !M™S +r0ÛŒ_ºÈÆŸÛàF²V°ýã[qÀ E#zŒãé’1*‡â—í•ëŠ ˆÞ •cÿN_B½9#,µ5oŽ¿EŒµ“ƒî3ášöâÑû!3ïZd/y€º’2µD«B°â¤vMA 4¤XvØVBàªh4û„Ù#¶ ¾pXœ4Õ˜&]Éz–'†ãûu™—ˆD¨ ¢°—2í¨9ÄÁTAlâ‹ÈЦ^*HE¨Æ„Òä']ÈdžÒ@eÎßA«B(ªÜÑ’ÒO1*¸Â«ÎEêÜÝÑØôt|‚EÉô"Í2ìºlè1Uº@gZô3x¬Æ^jpœv—”„¥Ç0üʾQ­`$A½2gS;µ~q·œŽŠÿž&¹q˥´ß–F`ÄA³•´ƒÜ]Áa›e¸ €ŠÐÙç{lQ,oÕ×I¢Xxð ‘ï–£,ÅÓa’à¡< ‚ÂÅJ&„ˆpéNL(Àz:q~E@$›P3îë]í;¿£7ó÷ý½‰d~tã£Ìå ™iÖµà¾9î®2{H¦1CŽÙæÏ[…J7Äc“Q¿2ë8Î?ö¼PÛ p ÄÙžmX¥£¥~Šûázƒ‹ Ÿ+¯‹ïj‚Lê„„CËŒpOdPzEÏ|Úµ`È`±2äL/ Ä”¥u¹Ýï”"”wI‡ç×çû{ci…:\ @¬qÌe ’ØYަ¤ß:0ì}ºåYöËá@uÞ?¾èÅ·€ ]#{ö¿Udá¢l1àK圞èŸnxe÷ã:¾5÷É>W[× j³T©@Y@ôlÈ Þsu†ù–wxz*jESè_òá 2`L€é hrŸ\³¤Qø^¿µ°‰i/D_ªcí¸ôàÈ+"÷Ÿgrrœ 0öëv³®øFè½òœÈÇ qÂJ Ìúþ8S‡î'h+*æW¼Û ìl]±Æœ|éèV€#pOèÿ¿ëã‡`«¥¡>p¹ó®\1‚û?ÕáPOxx‘0Ä\9„è#Dò·MF9¡œjàlù;ζER1Lk~u¾dX{ïÆå\޳ŽøJjþ?¨õÊ0aLªÅbbÞtlؽˆÁ°¤c‡º1uв ãX(àLÁ ½^N€¢‰ËÙ.zÎ]8•$ÖIYˆË]N<|Å‚ö‚(äË(§’4Þ÷ìâYM’“Ò:døâ|Ž%Y¡ù%Äò] Ø4‚: Ùá†r’y¦%ƒ±À1T¦-Šàж\¥A :4ÔuyLênëÂ|gÂpÞ"“: Óù"Qíšk·Ðb¹PàÕ°‚ë:F Ê :`6laF*ÜóŽ`Vx!ª‹œªqZ¢ÙØ];²ñìp.œp pe0Òìí¬Π…À&¬Òg‡Ee{±®Ð` $j쾬Ìu3Ì·ñô¶7Å:­z]_#™tƒ„EEX¥tr¼³Ò$bAð©6äKX1.(!XáǤf–½¦Hú¹â°åzrz¥c“H7ˆ)„#Âök­í€éF—Ysž]‡ , +Á3XÒ0…06A¼ž ,b B¾¬çkS)¨9ˆ Žax€RLUX•82]¨0º’Žì³*N&R5ÎPÍ…;671¨NÂõåHÑYœÄÏDt`ù&ß=ÐýyÊ *5JóqáIàI}2çù¬sÆHÕf¤Ý¢s§{bh!ƒÄ;¨à²ä…F‘ ÕóuÀTZe0ÄkLfµ“.‡¯ƒÛþ\s;¦Êø&sÂü…”¤5«iy:öÚéÞd)qÈ•dÜvŸf®úiâš°›uuÎâ"ß1Ä\ˈ¸Ì&œož>1ÞøW^PP±ÀöÑ…ä–7Ùijh8åb-ŒÑ"`+» ¯"ù /ßtº^%î*êãŽ(;)‚þR{âN¡"óõN q£“hvàx3‡ B'º%*G'%&4#$nY£ŽOk”ŒÆú j”δï€>R9}"ô{yGÅS†µ v;àLÛ(&ª‡¸¹\eDÑN›Ð4¦ùøaàÙNšxje3±FKJøqHÓÏ}¨ø"¹xvYØRó¿©tã þdʃ(²æ­ókÝÑ ñ#ÛÍêTUÒÛ'~jIEþ‚ºß)³*Ó‹ åQt+8 åÒ&|à~þøÒr¦˜Ðg´Ÿâž@‘ò‹;âÂÌ6"¨íYK:×ÛHoz`.ûÉžG-©—†²VQHª ñÙ€˜ý‚4@ŽOíÔ–yUéè)•Š¿Wè|ß ƒz"rc K/×hsšt¸Wè|zå šÚ£ñ<àYPFÒ YC{CÉ>§)c^‹*N ‰…${Φ4×YÎéCÛC¡1à»Fkó×ì³lÌÿ\ðÛíýÁÆ|<´ò$Îx½ãö¼Å€¡ôjV Þ]¶—Ëo‘ƒË×ì ;`±ñ[™âxÙ`k5 …apàkÑ‚›Uø—”™àªÎ…åÔ+ëcÐH° $Äœ½¢u&Ì™bùhHÆ` ÙzâÀ)é…+é·R¼{nJV 8`eË`k\RÁº¤;xj‰VpNœ ›“N͘^uhj ´Ug:Á¤Ó\ !]‰­+°`„"ÑÇûë‹7, ’6Ò–®”†QÀ̰œ"A"%ìLŸØ:.Oçñ̶c÷yúëæõýQg!Cù}¬‡rj•óªý³<†ßÝöãùVþŸ‹ÊÂýäX0]õï–ùB@¢y ç<2ÙÒ‹Ü©ðÒI¾]h_ Š7€AÂÆ¸‹™†¶_†j4ØÂ¸ÖÈÚSص•Ù2sê§гEÔ ©‡Dd¢åÁè(ÓŒn¾Yt3ó?‘å8Q]`ÈÙÆZìÝP{X˜ÞsŸ¦g”Q`5‡UÄȞʊɑƋerìXlS¡¡£ÁEôf¿@¼˜+X*gbÊÛHcQž—/œï÷Ï! ·ïâ_s™ÓÐ+éëxœ ¾%‚³:hÂ2?ïe»p|ÿŽ.Ìs;ö_åÞ0º×ô¬ôvÿ.˜q®C6ƒ¬<`œLS|":Šý"ï—_ÇZŸîùÐO»û¸\ÅÓ¨úãl–qr+ö¤7Å<¤%}°¥=ð+äá%5•@oõ× ±/˜JkÑŠ=s8ûØZ šd >1ÒX„Ø2:f/UãòWþöƒ8Â$¦i]°QL3ƦËt¬¦ œ+PÕ±P÷HÊ!å=9(™p|²¤ÆQ,òÒû…¶€ð§'TgŠJ$ N º¬å¼m(ªJ%´G9 T ýÊùñóßéŽYùŸÏŽøz~µ™¼g€iÍ>ÊžóÀˆå•`V/û‰÷}ÿ—‘†Ž¿Ï·¼`þŠtýöõï™ËòŒXîxÖïõ¥é±G €¸rï ×Îz¿ÒÑ}'Ü|b|øäiÛx¾¿iöñ:Šb‚LfuÈ{©F|¦vT÷ÅdÏҌ˥ ÏàË¥!ߊ`*¡xý½B È‚îÀ4€…Œ“ïÓ([›Ã<>3‰dH|²ßÆÁ˜?­×2BÖ,RH*¥â– _Åz讲ì|kÐR°KÁ[ã‚Z™(9KËGÃ4.E¼%„bó²­\JW5Ll—1¦ˆ Q ÒHr#ÈÏœøã`ˆš¡F‡ õ©Ô ¼dÈ7þÚhoô¿Á÷㟠mþ>ÿÏ`?­E;Qgèh^»ø8A™O’z|æs$!g_*­x³Úã‚À/ÿ>yœQ½µñÊé«¶ŽP¿9{ïôñË0¼ ÉMú_Ž ôJï(dG@eÁö x„Ï ¸/F6›²á¶U\âiqG(éNȸD:^"ÐZ:ìÅñU Ãªd¦Á­ #@ðñµ>•ÃD qÜ .‚dÕ€²ˆÐ"P“ÉŽEwL]Ã|ǰù3ʹããP3#næP€ cK]X–2iL9d ŒŠ4dÀîq€‹È «ŒJUØá"šèŽzñ4¶?Ç„è/¿ë÷8æÙîßÔýkˆ‚FMzÙûžnˆDˆ?¯§Ð5d è:h ˆŽ“_öB¡Ö>ÕÇ•àkõ_/¿÷_Ô˜ƒÎ3|~zâ"†Q4bùô{âINxø`ªv8ó—ö¯5l½G>6|ÿ³˜_.ŸüõÏwèÿçŸ#×ãóß,;P° iHèƒ8cððVirPúØB1aTar¡‚’êÀÐã¥a¨¨ˆZ¼•E††›6 9K¨=ùìXlåñNw„BÖ¼b”föá‘A6Åàü¸vàÀÀÜL"î¤.0xvâ“Kr ”æöCÂÈÝ µñKr½Ìs?" L’šÕÓ+„°0;˜Ç ˆÆ“3OÅþ?žW¦ŽwësfoX߯9ºzìúà`•MâŸÓŒøßÛäÿuÂf¡šQf×ûïˆÙè%ØPÎA4¥Óÿ`2­Wkþèÿ?ÕC¯oû¾Xyd‹¬ 4‰o%ÊiÔ’¯ÁUÇ€<ž˜ ®±¼°i´‹°ì<£ ”D7Ø~® *ò7åþzüpB<ÿ¯‡ìµ¾õëß[àÈ=7¬¹Áž6s‘ÚÝJí  ‘VJqvŠQA®u :ûc%•Æ´æ åò°C'n¸L³>dØ1DZN_ˆW`´˜2†À½Bk+à¡p/F:…ÐÕ4Rœ É".<Ã!#2:ëÄ¡@EN(–€òî}ŸLÿO"$ZÁf²ûÉ8׉ìŽèƒ‰ÑK8c )#X¦)ke`³D«&´÷ºžy$ÞHCøÍžy®bS=Õ»ãJC1/‘a.1¬ðCr£:ÒÏ%)àëªèßäâР“Âîq›ÕÀðrGé\Nø3eC—2XßZ×ý•˜«  ¯ÁËÒÓ"l"ªe`½ð͸¸z€@F·¶Ð€à7}Ww¿òÊHõÓæ~ÜîdΛÇXøçD˜Ö~Bœh~áÁì3 †‰‚1qOßÈlód2ÉÁFÞ³'Íõ÷Ígž‰ÂÁF¶cƒŸ¼âC²æTŠkéø…è㎩8°’v ¥ÌX;¤å¶Éx›¢¼ šÅ}/ ¶øUOAn(N…÷gÀÔ ±O(@1ð,ãn7¾•ê‘‹N;¤{ìe“ùc™H¤}O³FØ'ƒþ–\T,ÐÀy ¡¨ëÐF|jŠÑÇ–º#Óyuކñ&X^Sé‚!P>K²éÏFãJH Ø•ð"Yqšá”˜¨»49¼¯±EÌl`ï”:’Ť:<ú+è®{jôɃŽáÜ#IMÖ£Ìr:6ÔpË陇®+CDgy ¾+t_ÙôÓëþË Û¥ ã9Û×]Ki’øŸ“KOX©}Ç}rZ>@(—k:œ¿Ñùä°yŒç«NgnBS=ž#‡¾V™‘]Jöœ*”;Fì™ð³¡_’öiEðcZ 53¡ÎyCËŽ"j‹± <Åøh6k> B¬H1Í—f#1f4Š*ºŸ½Ä!œˆô ðA³ªj~<@Îy# »Òcb¼Âa&•bpf!U¼)’g‚YÈ •}ÒEL °sZÑ,^5+ Ï*ÆUIး׃/È}%é 3—ÇI ç?ØSäcÊ#±ßl.×2Ï„!˜ZÚNb¬RF•—6’€ ²7pÎ##‡‹ ¢Ö,xÀ7šl˜¯ÎÀKÏó¤2ÜtWŽ1…B‰·¹¶;(g–”j‹J1!€NÙ3\qÅMÈ8»ÓZg¢œ“Kˆªi;H™8@JtpPPBd€ƒAFˆ"ißý{܉±†“n@ò¨“.îÌ­ BºáTѬ°´²±å€Ä&‚@Y[a…¸x¹«”ÜUúo¼yàCEéL„ŠÀy„î}4âPÆ•Æ ´’è‹¤Ρ”?x·^²6œ¤Æ¶&-Â=ø@œfi1 «uÁ¶A°ú]€#ª`ô‚ÖD4ý„/æ—ß¡Ìñ†x ÍÎi‡LòîMog@~ è‡À5á?ˆ>Ã?1Äf¸Có h °¸ 2@íÜÂå‰XÁgA §K©´u¼qÚÆf‹ÜÈ™w•ý‹XÔ%'{1ÆVfºqT$ø²®]Ê+ lý+9$ÖB„2ý) Ù¥òá;Dáaž|Cæ|;¢˜;¸pxñè/L'CjJÈ@0§.ãé+}°ÊM¹g±=UbYà`¼a¡£“àžËEC­3ĺÓ\—ŽVÔÑrR94ä#u\ÐÎõ…tuÆlxQrªþr1Œ&™ÿV$K¹äùÎ||èÁ°dBçw |ÃæÕ+xÄkÜiÑŒ£OÌi7Ë_+Ù™:ß³ŒK’o‡ð Ãs—v Šæ DÍà‚Ç'”H¨LQ*„Á…â9ÃʆX¤7#À±X8  23e§8¡â ‘ïÏöaB+Àh@˜\–¢ôR›«Œð9òÓ†GËödpЀpCYiÞ_qË -I‡ ~œé~ŠgâèAÛ´|Á3²„õx’Ç"ïâ{â?CÂ÷˜hS rØ %qél_)$C(•Ú´€è<à™›?. ›2‹º(¦ €A~Œl¸ÄÙŒTΡ êt؇D‘¡6Øô2ð1¡½žýÖóEAÖ‚ }“×İ4ø},äï%3eškìŽÇ~»jÂôâ–öµBŠGÓ:rÚm$g Ï\¯dœT«UФi§Å›d±<~ciÏpO•5›j0LåÑèÌGÉ27ç#Í#‘³È¥û—ïþ›øy^Ü7ÆûAíA?‘ë'¡á’H–¨p²ùŒ²‰þÀ¼:OGÉ*ްîs üÃ(Üt?)îiš¼ |®¶Î>sì¬ÇѹdGÅØò“pYjãÐ6´B‚¥tiI:t~ÚB¼R`k-!Qxñ‚Pi[å­Ä¤¡•U •@">ì  Ú‡lדv‡9 ‰ÓðÓEÂÙ$Ò-+HUž&Æâòh™íä±¾ÂE2*Z"¼v´ÉÚuö}žNô†Òò8xÒätÎ4ò£‡L(÷„gC¿s‡AÈØo`Vd˜å*8ŸG±`•XsÕDFÌ|ë5y•ïËIõ^¡ð<‘=ŠÚ õ¹(ÒR8˜ %(v”Xw´àuÁ}Ê«ÿeF ƒæb÷P1þ\yNއõ,Ÿ$ï»"ÝYÒöI=Ÿ¿ä†2¸÷@ì¹ïƒ-ãUé$M1¬+âÈpè3Á@… ½BÒpçGr *Rò@CÖ­e ¾^0‚ÏŸ„~ÁÿOôOô·dŸ&]’»˜Šãƒ ¢ 5×xëÖ8.MâáÃçÏŽ"‚ž=œzôá€bíSáN }p˺I†=r–ØdŠŸ4´õàŽY@®ÆX‹ÄºÎ@G®É‡²ù±«}ôe§ÔîåõîË¡oørÁÆËÐÈÚ©ßJ½&^=R=–]:ðw ­¸%—Y•xû0ʬ'pÍÑ㩞¨¡€)qf̺&‰j–‘Wà"4Lµó9k­ÿÊ œy¹¸|!_¨çN(””&Âjô)”8¢ç‹0½¡j`\Ô¦­¸ÞÓH{ÊðfZ`{"ê8â"wB!—·ãÇ—‹%YÒ  üƒhIo,À ‡ ÜG. @hb©¹Ye°ŒÖP–Œ‡Ã2ãsâ*7<|zO¾5å¹ÿ^%ßáŽ(o1Õí_ž!2ÂâÍôéú}{äTnšhêô•ÆùbˆŽ¼6(æ,¯•ŒÐF ˜… °ã˜vÑ5TÖd"… a% ìp…%¡@É”À‰r 8³°ÿ¦ã÷ÏU‡ÛûpšéÔ„ÂÃîNTxÇæÇñ—ß'f€†Žègóxm!L…¢eHµ™òž_NE tËmàvÄe ¨Ülà-²V;Êytê†æék.,t3ˆÇ:êü9èàÅ:XÌ”1í§IxX«DœãHLœÈKøp~sàƒÝºµ«0 ”⣡â|$•v=K³Ìx tŽW8CÀˆ²Q9dž¡ìŽKÁ)Ç*x䡌tàÇPÙ¡Ù;°"p6p £GœÒŠŠð)¨2°ÏÅÎ'›üwÁG(A£ÂRîµÆs–¦ù¬q7–=âá‡4«“8ÊT<¦D͉ö!¤pNÐúì ÁVe•òO dßÑÌï”ü$Ý,oàyXð(ºL!E$ìY8 ÉS4±H¢J^!¶=4Î}c}~œûÿO›Ï_]×¾ÌÝ~œœëòý>/I?"?…ÏûyikDQŒ6h¶0laÌ;RX—V"ƒ¶ûDžUsû™àA@ ÄSiN![…l<êŸZýxhÂ_úOËû1§WößÚEä%“ºeèà „ãĽ 3œ<Ê%ÅU%Épl¨jq!ÓE°¥º)éŠH…`ï% vÇÀ–Ä–]0ý(Ú6 ê„qõ¥ASÒü8uºd`Ù·âŽ9Û8Èþ#—‹šÅ&'#¨dLB‘Ìå­„[Y£ž܈(„.ÅœþjÞ‚ ð8«Å4XÆÊmÇ•:k*®† FÁÈçËÀÇ'8rñ )©6‘ù|õÄ챤d ¸^$‡-YËž=+—º¦ebp„NÄ#±ÅçÅ'ŸJkÀ2Öx Õlié»ìÞ°£ X^ÄF(¨ZoDåHZâ í qœ8£Ÿ.ø¹oj`G[«‰#éÓ§$ Hf©Ø@¸áÊDì†LëÆ^R÷­R—³2Ùçöæ–3ëãÏ;¯ÜsúLOõä¶ã¶™ú²G¿×€º/4Á€ŽQáÄÏ \EÖe°”æsY狦<……‡— ³â‰Ò1§ ^Ýë Œß9ó“´fãþ“Èô¿™ÿœÎ-Ãß׳… ÛŸ,í¶Ï=sPt±!Y‰Ê®­°¹€ƒŠ$2Rä`ðÊ¡6R &C³•傉Qƒc0Ù ÑêÑkŽaâ´yA):¾üð¦&Cá×óݧ㉠0XÇKâ)$Ý*@‡|Í1Ã7¹*Е¾"ÒÜ ޼Z2{ttÍCD‰Oë €¶©Éj©;PmÓŠu¶Š±JžÄðàcÂÏiÓÚÓ&49¿KÁL”îý79¤ª!$Qž)zwKkRC® æ¹9#ua…Ýož; { Ç,¿Y·aËÇèé^Aš¿€µo>£rÛ„ó:“(`Žæ_>%Öô<èuÆŠ'äÅÊ =…àç5A¬ Á»qãPHn¾^Òp¸œ_H¡#$†èU|¢Lxß®SŸóx³÷ó¤¿xçŸòFùÖ®{ïŒCßLý¼æçˆ))@í,ƒ5R¡n94î$S2¶8„Èô*ÆâªðUíI½}Gªh໲>Ãz€iLdAnq‰YNt¤xòÌF2ÍÙÆxДÀTQÞ)ª‘âb,‡FxAÐïârØ®× 6uaZr·ä§z­FÓÛc>»„NY½Š¢,BŠÝy¿qWñ—œ§›zËš€‚Í^âMc< %ú( Øjå«:WAUNÇZLOu$DÔUSÐ^«Kƒ´äÏÖòñ³ì÷ºý¿÷š\¿zc+ãÇé½]}M÷ÿ㇪KmÛœP‚ˆ–½&o°øÍ.aá'2‹b8#o³L Ü‹X [†ÑRpƒr‡Euþ@÷ÆuBt—ϯåÿ£û' väß³˜Q ‹5ê°(dRÅ…‰Îµä-j:Ëa%2;‡FÊ.Ú¤…ˆ 2‡F¤Ç*. . ^#‹· ÖÝÝ¡2IÇc„¬ªÐUñ®¶àó3üñaQŸ^½û8R9 ŒqA«*¯Åþl7œšMZŸpÌòvÊLÊü‡©ç½抩B½HyÙgŽD¼®cŽÀ6Ù5³u$ŠL¸í¤< é Ôõ(PÖ}¡ Ðxâ2ÀàÞx·2å…t:/pÇÇ\¸E3— x”êéžú•äþœÎÔ€OÄzð$»†}º0ì?~NÊ nazVŸüñƒ—zRáb²èœ5p'Euxx‘r¬ìŒ•"¨&—½ZG ”'N0Õ´N³,ÈJqÖ{8Yì_‡6õBt¸9y´°â‡l¡ h’tãJׄ̈`Õ|u˜ƒ’QNv æ ì± U£›ÃÖD™ <8àb8p¼™'ÓœëŒdA‘T0/Á&f9›È@¶Èò¸jq'ÐÈU$(²<D“‘²—øØåSO#Œ•T`èÁ;uT¼#ìk˜Bm0xº®*i,’ú⇧9uˆ„¾@;PŠ…c"GM<{fmø£SE×w¼®ª:©Å¨ºÅ7èF6ÔÆa|HC› X‘h9dŠ ùÿÇ>çkí?@>Þ´æü&š…è(x“§ÄŠ™ªD„l¦$“{¥XÏ 1)…ЯE•ÙÅVVEÚ–Á‚Dç‹$ ÎÃM^D™â7A¹¸˜óQéϸ^³`¾(æ0•0‹ÿAA|r¯ž`­Œ‡zëÏg#26€4ʺ4á Å”ËFAAÅŒ>Äæ/Ø–†Á¶X á\ å³gðбlÕç~a9/V¨K)M¸P" ‰’¦.…Sÿ%´ŸH»ˆHÀÝ0Í(à¬DR¡@ɪGk¤§§«v XA¨y9½úâDoÆß]ñÉæ0'A³ú„IH$ž¼Lµ‰^øÔ,ÄÏlwx±çöu4Gu#¸`Ø‚WÙÄ(yÎ1´˜#Enˆ’ÄW†‡Ýœ€Ðí夥½À#rVòXW½&&cßß ?p³Ô'|é ¤e-ÃGG³KG;šÍ+˜n¢GQìƒÉMo™Ò›áƒ •J„nVÚÔÒÍ4Èð)iKL”@ê£ÀŒ®oä 3‘`Ðar[ÄÀOv®¬7Í…íG%x""( ÔÖÒ›³ža{ÎÍ!0âÒ £¼€÷å#CTÑ ÿåÉrë#¬œmè}jré`ùñ;†ÜÑŸ2g”[À<ƒ™ˆgLÿÑè|¿ïû¿éi:4ݳ¬a×Ó*ˆT…‰Lô{f‘‚#(’!æÅU²QrZ70€Ì#†bL6å…Ž¬ÃáUh¶Õ/µ“=ÜL ×ë³Ùø;¢#šô_[ø8‡ett  é2º)™xK”sž@CÅX&³®&Ðö@¯€åíLL´}¨!Û²¾Õ!¾^C–-€PÔw‚þ9–¢¤ô÷ÈK˜Û…Œû(ô§‘˜lÁaÀ‹î0%^ÝiÌü>³\FàY¬æ´Îc›næ×¼ž»ïšÁ0ÊiÓ»4ndP¸R `VØ*Ê Š Bz0ˆ•‰ÉâžIwäFM™HÌFЬ-äy ÀŒˆoφ Ïq1·±¬ñ~£#† ¨gÐxAg|†HòâÀ]’ÄtzyS>ñµb€ `Z—GÈU¹9F,ÖÞK¨n±ÆôžÚÄtK4ÅÅ\e ;]À¸IyA†Ø†>™àAÜQ =“‹Òpà¡$*`]”ÖgŒ9•ŽÃ‚d ʤƿç3åt½qUWoô>hNVˆXbNîG!K9šDG9Ö—‡§@ZãAT¼——ó(d„T`鎛€Ø² x´ä¶T6Ù%’J n.‹Pk@»ÇÙÁ›°‚ª4«]ªS“¡nð÷¸ ™ˆ…ð3ÖdœˆùW†ßÀ°ò3ÀXŽ7h­éðÙÍëÀÓgFðT ‚h S£Ñþêì¶ù±Ïm!Y¸hÔyú€¶Hø^ ÝŠ`s:ÎÒTNÿ Ž)B$l嘺ã¾óÓÒ™íâý9EA“èTÙå#²ñYÀobì$)2àO9¼³ÂdÂ"!áNL£õ|…'üú̾¾x”«Wú(mæ3 †”ˆì“㘋†‘e—v,lˆ$˜ (&Q(iÉ\–ô@`s‚‡¥Et ô¾˜ùl¡±HE‚ַ²ˆ`ÊX0òûP\Ö´¢[ÑYèƒKº {–Ì+5d<ïªÂè–ÖPù á²Ùu„˜ƒ·ì+˜ŠtZ‡ .} ÒÑ*+A7¶ÏlAluŸ­pcÃRo »vFi8°Ô¥9¸ó¹-ðÞŒ”˜?UÓ›êÛF;ͼ¬5@áØ6†_Ë€Œɛθl´;Û%í3}<£ÁiLqaªîíOkÿYh1µÈ50Éz¹[W/V³2™„uëQ ¹åöj…M<ªžÀà¶q@1ÆýuL𯷚¶ü<ƒTò—ò ¬ÓLà¬Eƒ…ðƒâ ±€#ó÷Êp`E°yÀ¿Þq„T-cªòöF´U6c2à­9³!ï% QN’$¦ Ñ$2f^hÏ”¨sCÆg“CY}ÈLPýp:gˆqUo µ2…xe´Ë ZŸ Àh¢‡šoë~?å@¼¿~søÿã§ßñÅäKTTN åÔˆ_#m†„w()^Ð{…$NxdÕ:vFи]`ñ2uM(lç™"š8tše!ä=U/×ÌlK!yð“è  ¯”õ+ù,Ðj£R§'ˆè«ƒ•ICÙ ó7®ÝÈD†ˆ3ŒÉY_i9»©Lp"Y&œm˜¢ÇÜC¡ŽN÷*A˜C¸À÷1Ç$+FФÃãGËÖÎ!f>V—Á'½`I±3G„êCOLa¦CÁ4öþ|}â9ëÅ®õblÒdš\ ä2@OLzÝàÂ&ò*¬\zðrBno¹ƒKLŽÈá,¿LèoŽªpAÇ%yƒ„nò vöJ¼¨fïT!ÑN ÊSž hVåqÀ5Ø('cÊÅaÈ~IìSò8!óØÆØôkõ%ˆV “c' /zå¦ŒÜæ:ÎÍ+ÈwÉ"¿B3Ø(‹M-­GA†Ã<ØÛC …®úx„³wQdÙ"žD‡ Ÿ7Òðr}nÄ){òsÉï@ö a‚Éà qSKÏäÖ“5Ì[ÌØA;•NŒðÞ¿åuú‡ãÿ€Á¢þqÅXiÑ PfdÂÕPµW€#, L·xQÌ•MUŸEJd}‘^,+dÔ#¥`læçjÃ#ÌÊZ¸mŽahwéíf&œºÁl8ÚlGFIáàT,„wWëÎ3U¾1!Æž1Ez(¶Ç,6>WËPß™F/¿ LÉ€QØ+¬x±„~놱òÜ,<ƒsFò¸L2 ÙÄEF´_4]LðÝd)¼ÆF3Ë -S±$¸>`½úâ.».¹a-OO£eV^ÄØãÙHQ ÊóØ@D.0)wgˆ¾Óp–àxm¹žç'`5`²(e–mqŠxŠD›„êй¾Á3 ] ©OMÐ+¨Û$R6–SçHè;š6ßL«¡øÒy¹½˜‡ÿê…‹Ê“@˜ïyšà…qÌìdF,Éæ-i]ŸÑ:©í8Qa)3ÍÌÉ6²H›é8qúŸ™®Ô;oUÄS3þÅY'Sl ” ‡…Úy‚ñ,FùEËi50¦OÃg¾ù¥~€ë¿7}ѱ›Œù늮Ïf×þ‚øæÿøA¨Ê…© ½WN1ò¸%³E‘ÀoàÔ!2‘ ¨‘k_Íh€ 3AŽ_³ôÒ€#f^e‘îIQµ‡>{äÇÃЄ:½!Ç/Ù€#*2Ä мlÔԲ„îŃ8ÙÖ•c‚­¡¶Í¥“ÁfT Á Ru¢h$:E¨Yµ™p ÃI™Ø·lÛÀÖú´³¨/på]C…Ó%Öqãu:D3qͯ\ÌŽ™R…8C(ÂÊ—Ô0O,9åÐÁ—Ò-†0Uu{¾]’ô‡aê˜÷ŒÛ/œñKB°wÔG`Ï*™†eFèpLÐ ÕÝSäj¸ ±3K>è¼lœ‡ƒé–@Ð+²ÌÊÛ;U±ÓË® Xk ‚E[¯œŠä3ì^¤b}pk4[s>n‹t ã„wÆôQa¼ün“ !¡%Z̼ìƒèsPL.3rZª‡>±´S=_Åà€a¢öÑm¦ÃÂÄ"ïÞ\ÙdNi;ªV¢Þqáï®8„†*–öÁk*؆È(bdšfáü…Á4BŸË/k‚ðÉ«»“%3£™&_ê(ucáßá?_ðëòÿŸãž¯Ôÿßꉳ‹:c1Â-Q+Κ˜XØÙcI/²kV«ªs”NA©ï£Ù­Ò§.L9º®m¿À&í R QÜ1¶aÕ9\>@ÓB´âª=“þN‘ÌfÊ^:®ûø]–“ëã/¡Ã¸š·|#ä” ð%ùä¼B©aåËØBA{0‘¬¸k®ªãEH“LñÑ„9Šã3šÀBIZ*xf,8­V\“=jø!À0Üxa‰ ‚ÁlK(rm{mäKˆÆdÒNœ"ä<]EqÊK–´p‚ÞÙR^»Þ×1yÓ2 òwpCíô.Ö“´Øµ„³ÜƒeÃ4¢ å¨0îÊÁ:XÝr ñ›ÄÈBÄM2›WäQâîŒP‹•°2лÖVeMÈ8pSVƒJëÑP7KQ¸èç©¡Õâùc]` ”Z–KE«aÔP~ÑÌv!¬á †¢uýŬä<Ñ+Ê„ƒ×ÒÂÝêtŒk‚»šWE  !erNÍú„½8 ´r—V Ð…zaÁ$P˜™îAÃŒB™Y%$·V‡Ý81d}Äo¤µ¡óÇPVƒZÕÄugôLŒ}3ùŸ¯ü;‡ûþãÿM)¨­ñèõ8CÈcd‘I'£9rÖNhÌÀ”DÍãÃ0±¥Gê¨`,”¯ l´ŠÉw¬®×BÙ‡9à+}9ö0­}œy°(Far‰d”Áœ ÍÔöDzîK¸³±:'¢‰0>h%c¤€iÁÁ „Ñ —މ4xV++•h@´+AËnÌ3¸ (^b‘¦†J‚FÌò÷à†#²HwCêEETä\Ú4N%O´ÉŒÃÀJn™³’cüþ2À>TY0—üËZ(¸W€ð®…ÆçOÙIˆéÖIB"HHZA´/¡B;ÀÊ®~#!¬W£ð¸p¶€ƒÅ2¢ 6 ‰è"Ò°ÅJt¬âJÎpC÷D·ÿ3Çeµ’ØŠF¨±ÿUøüOWêïô»"ἋŠÂÑ8 ‰ÙH|¹ @Ð× xåã‹Ë*É|fxœxv¿ðòðÇ|+0}a«ã9í8)«€…Épõ7ªf#ư¨3ÄÄö2ÞžnA j»”“ñXdãŒA_o€ÆnÃŒUÇî¯l´¦‚ÇDá³uƒF„(0¡ÜÈ dOKG°Ѓs La§hÜ'Ûc´Ûiâ ;ødJO35ð‰3Á¾)ТhîmÀ`i8G®É°_(˜*¢֬§W3òŽ#佊ñ7ÁS˜3DSMc‘Fµ­ð#ó_Ï|Õ .–Wðv$ ¢ü&ñ=é£ËÀœß<"<+9šá·¤¸€D Ä‚\!#Sº-Pî^züçÖ3¨m8¸[£°ð S89Å×DÑLpTåÌ (¸„t^DÁ.NHÈ¢)æ´…bYk€+^ò@"V4êðö}W*Ê8 Ñà'Êê¦; ,)Ý“ ù¹†Y2)QE·1H#Fˆ} Y”q‘ÚFˆhAí{ž¥½|= ºü8C é¤ù¼vzÆyœA¹àl`‰ä#”åW¶çb=Œ¸dûrX~‚6‰QlIK¶6ŠnŽ7›à›bØLâŽ9gx¤f L³ °pw¼Ä‹ëP3vn¢hJ¬Å¶ªuñ,å.¹òñ­ÜÂE<ðyá1÷Â7F¤¸âaG9!VZÁL¸ØIí*ðÈ=Ñâ$6Q­îxÙ%Ï‚†;‘ÞºÕ† 2˜†8Ù̇2tDÚ¯.Jfñ{+IË@™A¨3ÃbòÄ%’IðÃ"Þ‚þ`-@*!È›Èîu «|æòSu_¶†86/0Òâ€l@t£jr8ª5XŽÊ™â>!™‡ÐíægäæŒ5_‘ô<®ékÁoÅãr-á=ìø¾<•`ƒg0¿ÀëÆssç,Õ¦ÌÓúýr_á#‰y*,Ç\Œ‚íU'1“%LGÈ‚8sÿÎÏ÷xþ‚ 4ÿ|ñö4\c@U“#Ó–Ú%تAÃjÜrA \°h8T%‰ÎVbàñV’À«( àI4H,-,ÀVbÛéQRi|0œöÑIëß1žhLðßÒÝó!Ã,!ç–|•o™É¤ºP#óiæÚ× J®ÁA.‰ÔŠzïGíé ¹/œ¥î”ÙÃpä’I0\ÁÙ€^+Û®x-µÄ1Í(n´À& òÅ­péÈMê‹ÈÝòl)ò"î ÌRŽ–©ø¾ÿNVQVG©x)Þ ±g }žI7hqo>ŠVÔt†· p´3޳\ ^ ŒŽVwÄg«œŠYáK¦#Ö³Ãt`6¥ÁŽÂ'„ %‡³ÒOLÜš¦(ÜQsZЏ%Ê¡#—@©! ˜Á<èz¼°iÅFY`ì.cc¿ïœ3'ÑKS@»q˽ôi”¹2‚ݱ™‡„2Ê(EØdÕŠ³&×ÏöÏš­‹)ØÁ²(멎½ru1V÷ ¡ddzHpt¨V¼rÑöW×Ïu ÷fy½¢‡w‘ŒË¸^2"Òx ü1Ô¼ ”AÚÀ·¿€g…xw—áÑ#£Û§ $ï5±—»ÐÓ—GõØ€yóŒšïÇ`{ÀxÌÓK8Tʉ8l@:Âb±K\1™$È˜Š·ÿOŸ÷÷þR º‡ëCñ¯“hrjž€/gŠì#ô±¹ëüÖ‰XJÛéÈ8qèHÖå5¨€‹ Z”f¬Z¨¯ËÀ­ðD‹êmËö`/2̧"‹à‘Žj-g‹‚D«ÒaK8*$*‡Ãrûü0Tc&ùÑWåÞx±è€ÂÔ}„5\9MRPò¦G”ÇhVÛ~\4B ñEvÇ€²Ã ·4NÚFGÖT·ïÏo¡ôd- :¨’˜i@ p9Ä ¢Õï§n8&Q ùPéÛÍêQ:!Š2.8nÊÀñM÷Âs¼H3èιB¸àÌò š¾+CÙÃ7l@n‰=œ§gJ+eµÞ' bA=Ò=ܨR15ê—*!²ü| ¹ï<ScáóDñß&shÊhµ<_ÝåT¶^Wʇw€0XR ÛÅØÛƒB1õ%öäÖNÏèÛXRÍï‚OrMÀ+’•{èÏ–ªÐÆÇÖqC#AB…èÇ´ðÅ”cž.Y°`Ö¡ßÉa¢Ü‚&!ÃÉ+,Ï `Å'7N¹ÈÀúzÝ^£Âå!×È™gH)¿®³ÖcåØíâÁSBðÚÞ8™PÏ¡®mÔÈW½ÊèœL»8)‰T+"xØ ­2D 2s>Ï#ZÅ2D2¹…‰ˆ!v€É*"ÔlÅ=ÿç5}þïôyWóÿç ”×~üghÜéj$˜q%ŒyÅvÍKÂá³@^8ÅžŒrÜR 笜28â1 óYäèâ€@J4B aY‚¬(lfN5R¦TÎfØÜ3ÞÔ‰8ÌzkÔ>ÀPؤ*ƒŽcãîaAÔMÈr˜%ßURø7”~vAüi×ùr`[B<1ŸNCÛÄiÑuô„©í?EX Ψ¶ í8,ô=*êõ‚óƾTò³®Œð‚Ë’x«ŸÎ^5˨í[R5’sÐ"(zh¶L¬à¬ä–¿¼¢œVCÀ RÈÅä7b´èê”Ïbñ¾5fÉŠ°µeMÔ£)^7}I{w …C§vKñ^ Ë–S ,ÖºVë§ ìÅU‹§¡Q³èšô#~å— Zé°%Û\ ÓºZ‘¼• ¾y|9Böг“\WÍO6n±~gŒj2‚—;o%õä‰K=Ô¼e:tð,Â}œÆ F9l[`–›Œ¼dÞ94⳪&,OYÓf™8YuŽ)œpìKÕàíãdÄIŒÚÁtä§Åˆ BËÐpæ¯q6·qBŒPÈéZàEóE¦y´J-“uŒª±ž 'ŸjGPqã†G½ îôBÀ!C„˜Ù3ÇÕÜšfq™E9‘ eèÐõÉ*и{R³³@«; T5r8›˜BúÚü?çùþ›½ûüó6!Öóóåÿ~ÏÂÕBA2°»7xuñ†KŒDçMå„pG@€GŸ¼.‚–°…™(‰¶8€Íâ‘o!É’L¤ôNH¨T¼8—J1:t¾b/;²€"݉PÆN,{­ÈÚ¤ƒ8E"Tà5O~ÃÝ""ÙVG#qà 8ck<ÃfWr2i¬LeþCÇ hŸY$ÎIûp•1=ˆ¡ ª‹ÉỶ&È¥˜\qÈÒF­›rîˆr,™.oÈô°zx,JJí¯ ôyXsƒXi†ìÊR”ëC±‘Œ‚Ô‰ž«Û½•<ðìFìáO¸{â3¢7Uꆛp%j“z »R™ôÑ•mÛÅgoë ]²i¸à8k`¤X0æ@dyœ÷yÙIÐáyßÉÉ•Èѧ.³Ýª ³9>%xqµÆ™àï•‹ØÎz9ÐðóÜ.ÎeQ˜*B¢‰—6eV_J'B8E±,<‰2çÄ×{‰…('êrЇH àâÝáùÀîèAõÚˆ„Á’dÖK¢’h4@¢ò5ŒH…%À„J)°ù +fÝëŠMS&E¦bbôæ0C?‚Ž–4Ž9ñ-°5Ô ð¡·A¬:Ò‚Ùt§+F"º+äì!9Ð[ !8ÁœJ“¸X2d@„!7_v×êIãözÀ»ÃƒlürŠy¿6Õ80„oƒ¡¡/ BÝ0ƒ†Ä;³ïýý?£Š¸ºïöøåF™€\õ-¢'$ © ÓÑÇ(røpdYçÏ+ ð0{¬KqŽ\teÅ …î^Ž]¤7.¨ ¡•êä f‡y–&xUT@Ú.û/-70Y",`¢óYx"Å/¨ô‰eD(¼Ç'•8„o’“GaeÅL¹v‚›2•ˆx‹ß¥á„XS˜üÀâÜüX='æŒ@5—–&C’ σ(^€˜ƒÍÏ ¥ X27Ãébš!6Ãàe€ „œ×PÀøÒ =NÏ0õLÅ Ä 'vrY5\€•xfnµc~ï^ÊXwÞš µ7b N³Õë|pÅ–ýéG‰ÛêUQ*’=‡´WCÂp{Žv”çbp9½¬›µÌ@`dçë±´qX¢É(²¼{j-ŒPHÑ€ªÃ£l†€WEH«¬±vaÇ˜× ')à™¤ G`Ú4†ÐÙŠˆ±Ž€­7x/€×‰Úº7´qÁUÎ1B ´P¼@DÒÒ¥1Ø´k •Æß`Äuê0°6(ß\˜q.í XWp¢p+‹Ä †ŽAŒ>Á8 ”¦!‡ÎdZ†2T*qÃõÎoEfCføtIÍSfW„U+š?¡˜œ=B¼ši 8&q¾wX®ÊbΫØ e3¡,ÀF“"%2 -%")o0µÁÖQÆúºi¢ws™y"è€=•ÌëÞ®¿ORËLæ›ÇÞÂ2¾”2Ö" ÇOn³Çy{“0à ž;¦J`®xf?ÊMFØÊ¯G“Î) H£~ ´¸æ´r¢ÜEj Ó7<ºX C}`ñ Ì9gƦ4 z99¡Ël aÙüÌ'ijŽTÀap`ððä÷Sš§:ËLâëcvVªAæo²»øõš—W„5R[jç5ã¥Êt )Ç$¨äGZSlD>±ÂK3²WÖ?8â¡ñÌÂH.Q§¬iÖ:Â!°žšKP4ð”¡ËX úþü6tlw´ ˜Ä%’+–bЮθõ žÔÖhŒä™qxBì'$A6À¼#h!ÝEÚáYN!kÂeú&Â@œX“#ª•¿4*qÙbHƒÁjÆÀJs:(Žóq%ú nÜ Fo€b]ÍMJaƒæÔ‘ŒçÝ$îã›õ#4W9h,Æ À¹±Ýd°cÇ=¥ID8…¬(8¹‰¶@[h%Žg%S´ƒeÓŒ[å܃>h=.ù «á =¬Šœe „BñY½c…~)J€x„w,hÉ67 +Ž3i+Éii«ìï0Ÿ~UVêö¦­“½gƒ…3µs`2iÍáó*ë1ØF€£QÁßRÆ}éÙ1¥puâã HLL‡Œ»T—1ç~P"Š“U‚ ¾BíP¹c‡Û2{³êĶ‘à!*Y›"Œñƒ8FðB—¸²ÑpÐàxb D‰gAØå:ás%2¸M‹Ôæ¿ùþxðÚÒ*À#ð?¯ ³{A½‚¬‘pèn8”»Ê3ìjowãŒéÁ< ŒßXésÁ™éËÃÌcªŠ”:G…;1¦ÐCZ98¢@f!I)»JÑ Ô°/œ×Z¥‰’5Ê9ÔV#.hḑB%°„¾•;rÓVuъͲTäœ |„VBŒ²ÌÞ(Åw¨‚vD»áE“°”•Ah`àñµa% jŠæ¢Æ8ãÙ¡ iu3 3‚œu 9)õ¿ÀÓ‚0ò«ÅjнKQ=Yš 9…&HiL³ç§š±¾„ðºv³rOOµ]ìƒqÅŽÜ7KU²{Mp$HIp…^ËxóÁİ\¨ÇßË âð†Õ:×™7ì"l¶Œ i¼ì ( uÐ.NË8 ’:FÃÁ\+3ŠE?TÞ¾?P]›b'¹Çî€ú©óc…b( Ί½¸fo @ª"µ8À–Pg‰´K)u SØeã¾Vz<¤ Q$K9p.!6œÁŠ0í5ÂÑ¢ž´gjå°¹Zy[öË<šãQ^ò‡ßQZAˆ„H-˜Nàð÷MG³žmêáâQ‚Nà¶ 1ÌVaÕ¾ÊsŰ73šÇ)ôÃàÃ޽«Å™HJÎÁWfD¸PÚ ñ´¸ê-=¨ Þ¶sš2yoƒÄNS.d™Ø#Â]D‹*g =/ ÄB3P€À )vmš´ŠNx……¡’²j¢1Ç ¾d4ããŠ+q<$x]­_ŸàâS5› ïeN“Gøm©!‡“2àõu탴RðΖà=~N4QƒPZãy äë ªœH'U|zɪ¬n7‹’!Ê’/ h`à @6%õI†—xtÇq*JZðŒðbéûZìÃTV‡»ŒÑ£’îv(\7cß’G·|B"!ÙJ¾/re‹”1hPèDØÃ„‚-É ôÖC†Ü0hƒh (LíP,TœEZIAû Ç¥òqRŒ“Y8¥ÊFIdA¶|T¯QÁ‚î3±C"±9ÑÁ‘;âÒ…4t«XñN5åí"†?aÞø`﹘ŠGc‚qÃjâ–²M)Ô pŒD&RÀRYqP! +â{ ´ãÛ~ï ·š>Ó„àŠJfEd¦c·i=°Ð¬ŽC9Ó]é4F‘S޹)ïHJèBÖpE†§ŸKu—~0ý3ÄÄ…ì.Yo(H ¼ YjcäqY²"ÂГ¹+ˆfës²rö‘ôÖAX2—Sɧ/r/hÈ/†à»D¤eã"Ù½9¼ü;ýœ‰ Áª'µ\•æÁFgt6áC^—P/5 @Ý#qŠð@~I†ü)gXå· ]ìÆtƽœE £€ì3µ3óǧ€4ô ˆct{¡£¾aèq@½÷FL‰ö9^0.祑Ï:É+õDq—!Ã5ì²QmÂÍ:¢uœÏÄ<ÃÂyÊ0•ì³ç˜|´Àf¸ÎëÆ¯Ïðrë1DN‹óÊ“|%XADœ~[s‚ÈA‘ûTE–3€«r†Žëd¨¨ÛRp‚A)ɪS‚Kx«ßGÕ´Õe®!wðU Ë Ã‹æâhÓ*Á Èë)PÆY¢r~楛}´à-yƒ^ªºûNÞѲs Øä“€î•C.Tû»Üp Îð•Õë,Þ†ƒ‘Ž,¶¢$J*T9(¢„ {@C„(Ó5ÐiÃ$.*÷¸,×Çd¢ë“´‹7x{S š•cuUÆX㉱‘” $©¹0È\Ì«v@ ê'ÁÅ­>×9”ɸGËòܳQ¢¬-èqÊüº\æ4ur^)&‘V.‡ÙlñZQt7 óYW"V<ã2æöK`têL4@ß§FzC^£—a€(½BÜ ²ÅŒpd^ ‡¼rhSåœÌß‚ Tsœ´8¨‘¥1,0Ùðݶ8œ5¯WI‰IŒ(Ü!=p&,ø@Í4 R8â—eÚ/>þ0qßÎonBeƒâ5ar¸ä‹Erèê°úryç‹|õ¢' ±Ò ÇÍGM¸»±Á~[EŠIJÄj[E!£\>™Éz…B µ"ªÒ'$v²Ó{§ƒ¾R…ÁWaŠÕ—‡†*xÊŒ^ÉZs‘BÔ"ãúVqǚΉ¦èü¤á4zwÈÎwO±œá`c-pÖ£!‘ Ð!tƒ¾ãŒ ã€3øv€G–Ó\EÝIÙ\YFI«óüc( T† *À:ã#q ۉ˹I×;¦ù5 á@^ ùTž_DÅÃÓ†-à|¢±ÞôÎñ4©ø" Ú<…X`{L0p4Qf»m"Zb¡žuû˜C€dåø'gu¹Y»:ƒ K”®`¬à9c$òht؆Å–1°ätáPzs‚™”9@`Ÿ#ás¶rßžž¹‚êÃ/bÖ…Ž'RHP¬tŽ¿Ö0±æœ‚VK t°„hntô ㈔Š1÷°¥èC€ƒ­m‰«Ì0“[¯K–3À´%Í,ª€pXg-)F*Ã"3΃b)$WJ‘ÞxÂiPÝ`/ ñ+Öšœ7<Ž{nXGˆ7®ù©¥vX€7M…V#JŸsߘ  G EuˆQkÊ 5T©ËÃŒpëÐÌ¥ûÕCòk¯‰Áˆ õçŠÅÓ—œh\‰#Ë(>”ð°—4^2<Ä.Í:ö°cŠëËnÀg+Ö!¨$ Å×1Ÿ:¢jß²ŽZ™àŸq°´È,Ò+ÄcDoB˜Ì^ µVv€c QòaÄ`©ˆÁªø!àÓƒÁS§+–pŒž4¤åÑѫ؇`ñй¨xäϾ"q›ªÐ/ÁÚL©f´ûÍ ×ó' d¶õ« mà¿ÇÅ`2gÉ ¦\2p=É(¢±3*°`¡@Á,|ÁDCÄÎ8ð^ûŽ3±rFáæ8úD¹늹ÝQYn®OlG•4§ßDA(Ž"là  Õ@¹)JWÂf!×;債ç7Ç–xY{¸ ¯Q2ÊõÂ@Û¸”cM¡pƒ+¸Ë¥"Fãׇh)­Ø¼\‘ÄÊYÝ„‚c…ss2q¶5 ‡íy áóŒr9)ðÁš…›ÙóÃÕ£{FgP±èpi¦ÌzS.ò¼”ZM½Š:p‘¼ûéH A®5ÅÇ:nèP~7^3:Ïü™k)×·¾"0æC²ÇCˆíµT s¤9TE™„ÕË.0óá`*;3–… Q*¹jP ¤(öwÕ䡹ÍXÛ °N0è«¿Pk/DzöŽÂÈÍ¡‘!UœbTõÚŒMS4 'HGlOlJ‚¸dQŒùp1’ဋæ9Åë[ÂIô¬éõ®K1[U–Aéx6Àu“¸üY‹¨“±†ÃKjxf•„Áë1;MÌÃfä8†sÀ/ Ìa±çµ:O“3“ªþꔘ Pn+ÚCF0ÁÍ­ìk¹ÚÌT˜U>Šlgaœz{¢•KÁf ’‚âÿp^†1OȽDèŽó¥oKØÐìØWá/Rã€!•uÀc°®,‰ ´ÂœGn;qQ¡aLŽ…Áw÷æz&$Ó÷Žº…2At¼Èíg| ¶Ì¥ˆ³ ÆV(ÊfU`ÍB>Ü!¯$^½2ï5…¦á Å¢AÙ[‡å‹‡'ÊŽ-;’O<¯AZ'pV'bT.Z ð<Ï%zÏ“óýh”Â#*š˜¼Gún“C°Î—Ž…pÈ/‘Ëmʈy¡y¹Xd{š°Èœ\ ‡²ŠÇ5ói›q ¡f„^!o©u‰ ÜSŽ.o]…×9Ó‡÷ kÃËßyâ:Nñ<Ǿ1^oÀZdÿî5ÁD„TvèÓ çcŸ„“¨´s¯£wå-"Ðu…×ó° ßÀÎÌuÕÃ2ÿ¯ÛSѽŠ•DM9šÃ†ÂíiWwñÆÐ°I{&\1üÐ5%Ôv9"%Ú¼è;¸c†€ãŸJŠJ”Ëf8Ë·ŽÐ aŒ@æln&STõÎÆÃn¸Á fÇR„Î+§twžcz"íÑ™ÌÛ(9´RÔ9/0.£øÍÁtÞt äiY Ž\¸Cw’›ê3éó 5 ãÚõDáÁ$¬‘E˜ œ qÑ ÐT\bÓ 2Ø©e™!a mC<¾—ɹP  G˜œÃ²`^ØeÎe>R-´’‘Èr= ä 6ÉÚ¼!×Üu2ipvŽ9'“v(Côbž_•{D>¬šrt°XœŒ‘¥#vã ÃYp»c¢ °ÊÉÔ?˜ä¨0Ã|"æÇ‚„ ·e¬¹R HtBxò¸ã‹²LÐ4K)†Žª8B©ˆÜöÐ×¼2Ñ2Jvm’`@,¼À1î#ÒÀàð`R_4žyÑÇ«d“;\l³‘Äl`„ý{8‡bWßÓ_×õºä±"ìO®øtýþüoߎë…g³Å)â/B—±ÔE úN¢P ¸0Ú‡l)¯LØÜ¾È+noÀ‡Ö¸Ž(ûË¢‚8 bDTÐjs€\ÁçK“êRs;l¯ C’f¾z' üæ¡D4†S×A°)-,¹˜ãARžØÔ…Èì&PÞ UGlF8l#Aà¥òkòã }>\,0•Óg‰uf MÒ;)< 1õª}CqÁRAž`Á[#®¹0­¢ŒLC H!dB"ü…pÊ)Â컦BÌ/ìù9O œGK¾dòÆÛl)y°pí@ÍÄO ò.LØ»5Ÿ…|)Œd `ú¼†Dë+0¸äãTv#Áäà>¤fGíI‘Û45Åtæ¬EÉÏùj>Æ”œÃ] ¦XøÜ8wC;3 5…Dg¨A¯v·=[Ár#¦pî8=”e‹+‚:>¼ð5vQÛ˜œ%)¥õƒÅš…}ˆ#һƸó¾&:I¦{™£–Ñç Ø‹Ÿ.U0Ò˜ËBpj(ÿ.аèz¼î´æS&2TD»A%€$ÐŽ˜ZÔ1Ô0ÃSƒbŸ£dË4Ç+H³s@Atðxt/`0IÍÁ*æßW2òŸÏŒªG_ªÇ Ê€.(±eÈq ½£ãK×ê@¶cª;ƒûð`nEÅ© æÏ©ýuýÖG4qGLzàê.Gö¢2ârm ½"b‘’œ.bØ ò¾€28Ã6Èèzg`ðœjò0b“è㬤Igã°àãÌk¸ÚÍwäÂ6z†·ÔvÏœB -F—r¯bÙ¶ >y”EI¯•: Ì=«`Uå!ן"F M›¶LÚ8sRͦ’2‹Ÿ.lÆPŒ?hh) ¾^!vYµ°=8À­õÈw¸vŒ¶ò£ ¡w‚-<’\Êgέ©úš¸á¬B ‘ÀAV˜Z ¤u¯Àˆ(ÍáÖlNªdfž!Ѩ½éJv©ÑœB%è B¤!×Hq¸yÖ¶aÃ<›~ʽ€AB³ŸŸb¨;a9Óž bÖždçG½œ²•è]›OÃ<{’E ŸMñ Ú§(ć¼€7á+²õ$*  ñœ hÒ×Jw ˆ„ðYöðn—€¦*ȤH©®ó& º© Øaì>¸¾ÉܶÇHµò$øÓÓ‚,¡AÕa|6·§hª¢‘ˆªØ`´Ø(쓈ãA!ÛŒ_‡«Vh=¥Fz®è<¿^³mŸp~^™õƒ$@=@;—p‰Ì §¡ä`þìô Z †BaÉPµÐîbFÙ.’§À&¯KG¡T;N¾0È¡ˆŽ|.,>ÁÆÝ¸8rw2 -èn8‹!d”æŒÐöqjEü„b! èpã%ž—NÜI§uaˆèyÇ€žªâ¹‰øôBt¼‚Iñì™Æ‡YƒÜôô¼RœÓûÃäœ4*·q ¡K±4)Ù¬/ÁàðR;c,êTè÷¹ð€+FF·  AË‚ˆ·  žh>W’qÊË#>¤¶ò~ƒE®ó ãÃMÜ^×Õ&ÜÇ*ˆ=NJÎo‘ÕOD2odºÌl{®h #¨Ãs¨iŸmûž†‘$ú>üÏšé|'µ”)1Ú—3 ê1èä*c‚B#Žâ"oC®<ªðÉ'¢žõ$•pûh1èàöÞ¡/SûIjÑÇ ÖJ™ÿy˜üÓa¼G™f<§ÔP54Rs4Ï뱸Bæ6.79HÂR†ÇŒË„)p ó È4q3–ó!(&t”1—/D€XDL8#šyIÑ­ùÝu¶8¥æ H…û6cƒƒãJ×P@ÏjS`Bv™è¡à‹ì8¸3Ùy : FDÊ&ÅÇ $ðm Þ#6q.2Y×aèvË(ÀL.² q¸.0ˆÛ±‹Ã(‚h+0\@¹AsÇ öír哚Áy` 57¤Íz ß3‘†µ¹àd£.àC+Ý„ùìǃv-¨h…(ÛAÌ^á¯aÓÜ!<ËØË‚<íH ”ËJS<}jK'tð”æá w°†‘>SuM‰ÀŸŽù_á5oßÔtp]š.¹„ñ#Þx9è茞…°à]š’óO ¾_݈v|Ui8àJÄ(þUˆg`8¿Ñ­bF<€sÊøÊ]P›Ï\*|dÉìC†Njª_`>ð{ÁdhK¶ò-ˆËÄý8S"÷&‰äé‡ =¤Wы⇧#¹4°&SšÈt µ~ižQé;Áxúy¦Aïå´¼Ÿ3Y™—£ÏS‚%°žOç ZeÁ óUEù h爦2A¸v 1àß9zÉ–þŽç%‚¨_>Ò‡ÆxãiugQ΃ÒçˆÄ_¨„Ò«9Ð ˆ¤ ¾@Øøä¨Ú²qå¡|x®”­{<·_ÃT‡t[±È.¸Ò¤5 è|iÑÝXÙŒÕ';Œå#UF À43±9i­á‹g¢yº‡0tpL-A5@©.ÂìYÎî›MáòPí;/Q-Ú ×Ep·É¿A¼Ÿ\¯\‹qèaê^ )!kX–ÝÒp@ƒ3Õ94­9ô€v/—îe0{}<ËE ÇòfmÆà}^´ì—Qµ ü3ìŸ|`I}m>B}±xG#ñ…ô¨op`ew‡ rVƒÌD ͺH$aJö\qž:²àÚòáPN¦ÿ¼ÞȆø%U¡pBt([Òã¡x²ò冖l—Œ—j—”)84ønÅ:Ë1x¼¹T+¬BPip20ÛÁ·n'& ÞСæ¥×*^K5°ñW¼p,û×7O^øŒ;ЀÒgX žHõÞYÂ^ø‹9@¦l&¤.ýFîò¦;Ç;Ï.‹r•ig"vË—‚1LÕz;§qôø9Ö@îñ»,——­Ê3OVü2ôšk|² ¯=7¬KÄÚuË©«ØãÅÉm A¬OC/ PtWÑàXˆXÀz0U|hèqÊ`ëkna¥Œ±ÄšÉl¯$râ2^œq „;TÈä‚ÁÅû˜¥ã ßcé M8f®`*¬VñÁ"á¹<´ãþ˜àhÔ§E ‰…pà†ÜS@S‘Ææ‡€€0ü)•µ¡ý >yD„Ýï×K žÔ³Æ­ùá_a]_*pØTüK@ü‹Â”¥ü!úqÅî{SJðëø¼¶£¦ÙTûN%˜À×k$ö$è'ј%¥™¤p=Ž. XH0¢;AËX‚B<(E÷<ðçM ÿ]QΑÚ.˶;u[Gàyúû¼ó@½HìyÇìòr)`ËÊ´ûÕ~?ù<œ"¼=ƒêsËô37!úr²šS©êyâÂÙäP$Ì×c•b­†kHÇŠMz"Ÿ'6ñáã屿>ì§ëÔj–¥]—\ð—î4ƒ§‚`^Œl÷³¹ÇÂ2ãŒ-È´…lÁߦVZ½ŒÀ׎F I€YÉXC&?4y:ƒx?,߆z÷ÊÀ=¡ße2³;±É¡Ùo¬®IžÅC€öxäU;÷MÇôºÅ׆g+î˶^ÞhïÜ6÷B`°Ô&¸@ʱ½éS6Ìå…%?„ÏnÜ8Àž›uÌÏjg§;QÉ—…µñ+ÀQ+÷9À».\¡hU`Y!–¸ùuÏ,ìèñëÈGo<û¸Éd¸gŽg.™ÏG‡Ë†•¼†„Øþ»‡éÁ$Áà‚ùùiN5´„£X0gR¸ªÂäBžjœoÏ\¾µy6g¼³îœ313<”3]Џ5‚ÚÅ0. ·ƒæDù2§ã<®< nðÌú1®dx£1í¤> Þ@Žô7ž”<ðú:Uù2}ýð%F瘻än8â Wýº¥å ®Ž?&~¸úˆe[综.»óP‚®„_¨c¬dïDëå?rúä„Ó}.¬¸ë:å÷ãçùÍáù 5ãàþy´ÅìUÜß̸šàŠ`œ>?@§çE ¶u]o¡^$É—‰ƒÉtÙÂp-Áâ†[8°]}†Yã× Õs Ãô~G=ØÙ¤¼=ÝÜP-€ó±;”©êâ¯f3’äEµäk³x Ð!¸ ÿRùµß:­¬Èï°éÄàÊ+gèÑ6OÅb¼ÉÂLß»mÂô &yBC±ªøOÇI*µœk›õË!ú¡þõ‹È¨=FÉûŸ'&´€^³T ™œ²ðkÛ½ð `ÍSåüþÜ(÷Tr=ç»Ì >šR÷0bÂùuúêùüž%é:ÛÚž<µ]ø;ˆrôÌ»&Lò!kY68Ì- Œb‰È``0Ò;4D´žCmM¢,ˆ •8xꓜ`b4$I’RUÔàdA@\Þ.—ˆ+<„¤ H’ @à?î­aOˆðP‹Â8€ €ƒÆ*€®jªŽp€  Ž;J£ PˆÞQJ>ð¡Z ‡ÖXl¦«Qé /ØNò@:È PD÷Ä:Ñ’¤~Ï矾ëoµäæµÿðf¾ÿsÿ7ÿÙmod_wsgi-5.0.0/package.sh000077500000000000000000000003131452636074700152710ustar00rootroot00000000000000#!/bin/bash set -eo pipefail rm -rf build dist rm -f pyproject.toml pip install setuptools python setup.py sdist ln -s pyproject.toml.in pyproject.toml python setup.py sdist rm -f pyproject.toml mod_wsgi-5.0.0/pyproject.toml.in000066400000000000000000000002111452636074700166550ustar00rootroot00000000000000[build-system] requires = ["setuptools>=40.8.0", "wheel", "mod_wsgi-httpd==2.4.54.1"] build-backend = "setuptools.build_meta:__legacy__" mod_wsgi-5.0.0/scripts/000077500000000000000000000000001452636074700150315ustar00rootroot00000000000000mod_wsgi-5.0.0/scripts/run-single-test.sh000077500000000000000000000007561452636074700204400ustar00rootroot00000000000000#!/bin/bash END=$((SECONDS+15)) mod_wsgi-express setup-server tests/environ.wsgi \ --server-root httpd-test --log-level info trap "httpd-test/apachectl stop" EXIT touch httpd-test/error_log tail -f httpd-test/error_log & httpd-test/apachectl start while [ ! -f httpd-test/httpd.pid ]; do if [ $SECONDS -gt $END ]; then echo 'Failed' exit 1 fi echo 'Waiting...' sleep 1 done sleep 2 curl --silent --verbose --fail --show-error http://localhost:8000 mod_wsgi-5.0.0/setup.py000066400000000000000000000415731452636074700150660ustar00rootroot00000000000000from __future__ import print_function import os import sys import fnmatch import subprocess import tarfile import shutil import stat import re try: from urllib.request import urlretrieve except ImportError: from urllib import urlretrieve from setuptools import setup from setuptools.extension import Extension from sysconfig import get_config_var as get_python_config from sysconfig import get_path as get_python_lib # First work out what all the available source code files are that should # be compiled. source_files = [os.path.join('src/server', name) for name in os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'src/server')) if fnmatch.fnmatch(name, '*.c')] # Work out all the Apache specific compilation flags. This is done using # the standard Apache apxs command unless we are installing our own build # of Apache. In that case we use Python code to do the equivalent of apxs # as apxs will not work due to paths not matching where it was installed. def find_program(names, default=None, paths=[]): for name in names: for path in os.environ['PATH'].split(':') + paths: program = os.path.join(path, name) if os.path.exists(program): return program return default APXS = os.environ.get('APXS') WITH_HTTPD_PACKAGE = False if APXS is None: APXS = find_program(['mod_wsgi-apxs'], paths=[os.path.dirname(sys.executable)]) if APXS is not None: WITH_HTTPD_PACKAGE = True if APXS is None: APXS = find_program(['mod_wsgi-apxs', 'apxs2', 'apxs'], 'apxs', ['/usr/sbin', os.getcwd()]) elif not os.path.isabs(APXS): APXS = find_program([APXS], APXS, ['/usr/sbin', os.getcwd()]) WITHOUT_APXS = False WITH_WINDOWS_APACHE = None WITH_MACOSX_APACHE = None if not os.path.isabs(APXS) or not os.access(APXS, os.X_OK): WITHOUT_APXS = True if WITHOUT_APXS and os.name == 'nt': APACHE_ROOTDIR = os.environ.get('MOD_WSGI_APACHE_ROOTDIR') if APACHE_ROOTDIR: if os.path.exists(APACHE_ROOTDIR): WITH_WINDOWS_APACHE = APACHE_ROOTDIR else: raise RuntimeError('The Apache directory %r does not exist.' % APACHE_ROOTDIR) else: if os.path.exists('c:\\Apache24'): WITH_WINDOWS_APACHE = 'c:\\Apache24' elif os.path.exists('c:\\Apache22'): WITH_WINDOWS_APACHE = 'c:\\Apache22' elif os.path.exists('c:\\Apache2'): WITH_WINDOWS_APACHE = 'c:\\Apache2' else: raise RuntimeError('No Apache installation can be found. Set the ' 'MOD_WSGI_APACHE_ROOTDIR environment to its location.') elif WITHOUT_APXS and sys.platform == 'darwin': WITH_MACOSX_APACHE = '/Applications/Xcode.app' if WITHOUT_APXS and not WITH_WINDOWS_APACHE and not WITH_MACOSX_APACHE: raise RuntimeError('The %r command appears not to be installed or ' 'is not executable. Please check the list of prerequisites ' 'in the documentation for this package and install any ' 'missing Apache httpd server packages.' % APXS) if WITH_WINDOWS_APACHE: def get_apxs_config(name): if name == 'INCLUDEDIR': return WITH_WINDOWS_APACHE + '/include' elif name == 'BINDIR': return WITH_WINDOWS_APACHE + '/bin' elif name == 'LIBEXECDIR': return WITH_WINDOWS_APACHE + '/modules' elif name == 'PROGNAME': return 'httpd.exe' else: return '' def get_apr_includes(): return '' def get_apu_includes(): return '' elif WITH_MACOSX_APACHE: def get_apxs_config(name): if name == 'BINDIR': return '/usr/bin' elif name == 'SBINDIR': return '/usr/sbin' elif name == 'LIBEXECDIR': return '/usr/libexec/apache2' elif name == 'PROGNAME': return 'httpd' elif name == 'SHLIBPATH_VAR': return 'DYLD_LIBRARY_PATH' else: return '' def get_apr_includes(): return '' def get_apu_includes(): return '' else: def get_apxs_config(query): p = subprocess.Popen([APXS, '-q', query], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if isinstance(out, bytes): out = out.decode('UTF-8') return out.strip() def get_apr_includes(): if not APR_CONFIG: return '' p = subprocess.Popen([APR_CONFIG, '--includes'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if isinstance(out, bytes): out = out.decode('UTF-8') return out.strip() def get_apu_includes(): if not APU_CONFIG: return '' p = subprocess.Popen([APU_CONFIG, '--includes'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() if isinstance(out, bytes): out = out.decode('UTF-8') return out.strip() INCLUDEDIR = get_apxs_config('INCLUDEDIR') CPPFLAGS = get_apxs_config('CPPFLAGS').split() CFLAGS = get_apxs_config('CFLAGS').split() EXTRA_INCLUDES = get_apxs_config('EXTRA_INCLUDES').split() EXTRA_CPPFLAGS = get_apxs_config('EXTRA_CPPFLAGS').split() EXTRA_CFLAGS = get_apxs_config('EXTRA_CFLAGS').split() APR_CONFIG = get_apxs_config('APR_CONFIG') APU_CONFIG = get_apxs_config('APU_CONFIG') # Make sure that 'apr-1-config' exists. If it doesn't we may be running # on MacOS X Sierra, which has decided to not provide either it or the # 'apu-1-config' script and otherwise completely broken 'apxs'. In that # case we manually set the locations of the Apache and APR header files. if (not os.path.exists(APR_CONFIG) and os.path.exists('/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk')): INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apache2' APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/apr-1'] APU_INCLUDES = [] elif (not os.path.exists(APR_CONFIG) and os.path.exists('/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk')): INCLUDEDIR = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apache2' APR_INCLUDES = ['-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/swift-migrator/sdks/MacOSX.sdk/usr/include/apr-1'] APU_INCLUDES = [] else: APR_INCLUDES = get_apr_includes().split() APU_INCLUDES = get_apu_includes().split() if not os.path.exists(APR_CONFIG) and not INCLUDEDIR: if sys.platform == 'darwin': # Likely no Xcode application installed or location of SDK in # Xcode has changed with a new release of Xcode application. raise RuntimeError('No Apache installation can be found, do you ' 'have the full Apple Xcode installed. It is not enough to ' 'have just the xcode command line tools installed.') else: # Set INCLUDEDIR just to avoid having an empty path. Probably # should raise an exception here. INCLUDEDIR = '/usr/include' # Write out apxs_config.py which caches various configuration related to # Apache. For the case of using our own Apache build, this needs to # calculate values dynamically based on where binaries were installed. # This is necessary as on OpenShift the virtual environment gets copied # for each gear to a different path. We can't therefore rely on a hard # coded path. BINDIR = get_apxs_config('BINDIR') SBINDIR = get_apxs_config('SBINDIR') PROGNAME = get_apxs_config('PROGNAME') MPM_NAME = get_apxs_config('MPM_NAME') LIBEXECDIR = get_apxs_config('LIBEXECDIR') SHLIBPATH_VAR = get_apxs_config('SHLIBPATH_VAR') APXS_CONFIG_TEMPLATE = """ import os import posixpath WITH_HTTPD_PACKAGE = %(WITH_HTTPD_PACKAGE)r if WITH_HTTPD_PACKAGE: from mod_wsgi_packages.httpd import __file__ as PACKAGES_ROOTDIR PACKAGES_ROOTDIR = posixpath.dirname(PACKAGES_ROOTDIR) BINDIR = posixpath.join(PACKAGES_ROOTDIR, 'bin') SBINDIR = BINDIR LIBEXECDIR = posixpath.join(PACKAGES_ROOTDIR, 'modules') SHLIBPATH = posixpath.join(PACKAGES_ROOTDIR, 'lib') else: BINDIR = '%(BINDIR)s' SBINDIR = '%(SBINDIR)s' LIBEXECDIR = '%(LIBEXECDIR)s' SHLIBPATH = '' MPM_NAME = '%(MPM_NAME)s' PROGNAME = '%(PROGNAME)s' SHLIBPATH_VAR = '%(SHLIBPATH_VAR)s' if os.path.exists(posixpath.join(SBINDIR, PROGNAME)): HTTPD = posixpath.join(SBINDIR, PROGNAME) elif os.path.exists(posixpath.join(BINDIR, PROGNAME)): HTTPD = posixpath.join(BINDIR, PROGNAME) else: HTTPD = PROGNAME if os.path.exists(posixpath.join(SBINDIR, 'rotatelogs')): ROTATELOGS = posixpath.join(SBINDIR, 'rotatelogs') elif os.path.exists(posixpath.join(BINDIR, 'rotatelogs')): ROTATELOGS = posixpath.join(BINDIR, 'rotatelogs') else: ROTATELOGS = 'rotatelogs' """ with open(os.path.join(os.path.dirname(__file__), 'src/server/apxs_config.py'), 'w') as fp: print(APXS_CONFIG_TEMPLATE % dict( WITH_HTTPD_PACKAGE=WITH_HTTPD_PACKAGE, BINDIR=BINDIR, SBINDIR=SBINDIR, LIBEXECDIR=LIBEXECDIR, MPM_NAME=MPM_NAME, PROGNAME=PROGNAME, SHLIBPATH_VAR=SHLIBPATH_VAR), file=fp) # Work out location of Python library and how to link it. PYTHON_VERSION = get_python_config('VERSION') if os.name == 'nt': if hasattr(sys, 'real_prefix'): PYTHON_LIBDIR = sys.real_prefix elif hasattr(sys, 'base_prefix'): PYTHON_LIBDIR = sys.base_prefix else: PYTHON_LIBDIR = get_python_config('BINDIR') PYTHON_LDFLAGS = [] PYTHON_LDLIBS = ['%s/libs/python%s.lib' % (PYTHON_LIBDIR, PYTHON_VERSION), '%s/lib/libhttpd.lib' % WITH_WINDOWS_APACHE, '%s/lib/libapr-1.lib' % WITH_WINDOWS_APACHE, '%s/lib/libaprutil-1.lib' % WITH_WINDOWS_APACHE, '%s/lib/libapriconv-1.lib' % WITH_WINDOWS_APACHE] else: PYTHON_LDVERSION = get_python_config('LDVERSION') or PYTHON_VERSION PYTHON_LIBDIR = get_python_config('LIBDIR') APXS_LIBDIR = get_apxs_config('LIBDIR') PYTHON_CFGDIR = get_python_lib('platstdlib') + '/config' if PYTHON_LDVERSION and PYTHON_LDVERSION != PYTHON_VERSION: PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, PYTHON_LDVERSION) if not os.path.exists(PYTHON_CFGDIR): PYTHON_CFGDIR = '%s-%s' % (PYTHON_CFGDIR, sys.platform) PYTHON_LDFLAGS = ['-L%s' % PYTHON_CFGDIR] if PYTHON_LIBDIR != APXS_LIBDIR: PYTHON_LDFLAGS.insert(0, '-L%s' % PYTHON_LIBDIR) PYTHON_LDLIBS = ['-lpython%s' % PYTHON_LDVERSION] if os.path.exists(os.path.join(PYTHON_LIBDIR, 'libpython%s.a' % PYTHON_VERSION)): PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION] if os.path.exists(os.path.join(PYTHON_CFGDIR, 'libpython%s.a' % PYTHON_VERSION)): PYTHON_LDLIBS = ['-lpython%s' % PYTHON_VERSION] # Create the final set of compilation flags to be used. INCLUDE_DIRS = [INCLUDEDIR] EXTRA_COMPILE_FLAGS = (EXTRA_INCLUDES + CPPFLAGS + EXTRA_CPPFLAGS + CFLAGS + EXTRA_CFLAGS + APR_INCLUDES + APU_INCLUDES) EXTRA_LINK_ARGS = PYTHON_LDFLAGS + PYTHON_LDLIBS # Force adding of LD_RUN_PATH for platforms that may need it. LD_RUN_PATHS = [] if os.name != 'nt': LD_RUN_PATH = os.environ.get('LD_RUN_PATH', '') LD_RUN_PATHS = [PYTHON_CFGDIR] if PYTHON_LIBDIR != APXS_LIBDIR: LD_RUN_PATHS.insert(0, PYTHON_LIBDIR) LD_RUN_PATH += ':' + ':'.join(LD_RUN_PATHS) LD_RUN_PATH = LD_RUN_PATH.lstrip(':') os.environ['LD_RUN_PATH'] = LD_RUN_PATH # On MacOS X, recent versions of Apple's Apache do not support compiling # Apache modules with a target older than 10.8. This is because it # screws up Apache APR % formats for apr_time_t, which breaks daemon # mode queue time. For the target to be 10.8 or newer for now if Python # installation supports older versions. This means that things will not # build for older MacOS X versions. Deal with these when they occur. if sys.platform == 'darwin': target = os.environ.get('MACOSX_DEPLOYMENT_TARGET') if target is None: target = get_python_config('MACOSX_DEPLOYMENT_TARGET') if target: target_version = tuple(map(int, target.split('.'))) #assert target_version >= (10, 8), \ # 'Minimum of 10.8 for MACOSX_DEPLOYMENT_TARGET' if target_version < (10, 8): os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.8' # Now add the definitions to build everything. if os.name == 'nt': extension_name = 'mod_wsgi.server.mod_wsgi' else: extension_name = 'mod_wsgi.server.mod_wsgi-py%s%s' % sys.version_info[:2] extension = Extension(extension_name, source_files, include_dirs=INCLUDE_DIRS, extra_compile_args=EXTRA_COMPILE_FLAGS, extra_link_args=EXTRA_LINK_ARGS, runtime_library_dirs=LD_RUN_PATHS) def _documentation(): result = [] prefix = 'docs/_build/html' for root, dirs, files in os.walk(prefix, topdown=False): for name in files: if root == prefix: result.append(os.path.join(root[len(prefix):], name)) else: result.append(os.path.join(root[len(prefix)+1:], name)) return result def _version(): path = 'src/server/wsgi_version.h' pattern = r'#define MOD_WSGI_VERSION_STRING "(?P[^"]*)"' with open(path, 'r') as fp: match = re.search(pattern, fp.read(), flags=re.MULTILINE) return match.group('version') # Final check to make sure a shared library for Python does actually # exist. Warn if one doesn't as we really want a shared library. SHARED_LIBRARY_WARNING = """ WARNING: The Python installation you are using does not appear to have been installed with a shared library, or in the case of MacOS X, as a framework. Where these are not present, the compilation of mod_wsgi may fail, or if it does succeed, will result in extra memory being used by all processes at run time as a result of the static library needing to be loaded in its entirety to every process. It is highly recommended that you reinstall the Python installation being used from source code, supplying the '--enable-shared' option to the 'configure' script when configuring the source code prior to building and installing it. """ if os.name != 'nt': if (not get_python_config('Py_ENABLE_SHARED') and not get_python_config('PYTHONFRAMEWORK')): print(SHARED_LIBRARY_WARNING) # Now finally run setuptools. package_name = 'mod_wsgi' long_description = open('README.rst').read() standalone = os.path.exists('pyproject.toml') if standalone: package_name = 'mod_wsgi-standalone' long_description = open('README-standalone.rst').read() setup(name = package_name, version = _version(), description = 'Installer for Apache/mod_wsgi.', long_description = long_description, author = 'Graham Dumpleton', author_email = 'Graham.Dumpleton@gmail.com', maintainer = 'Graham Dumpleton', maintainer_email = 'Graham.Dumpleton@gmail.com', url = 'https://www.modwsgi.org/', project_urls = { 'Documentation': 'https://modwsgi.readthedocs.io/', 'Source': 'https://github.com/GrahamDumpleton/mod_wsgi', 'Tracker': 'https://github.com/GrahamDumpleton/mod_wsgi/issues', }, license = 'Apache License, Version 2.0', platforms = [], download_url = None, classifiers = [ 'Development Status :: 6 - Mature', 'License :: OSI Approved :: Apache Software License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Operating System :: POSIX :: BSD', 'Operating System :: POSIX :: Linux', 'Operating System :: POSIX :: SunOS/Solaris', 'Programming Language :: Python', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Topic :: Internet :: WWW/HTTP :: WSGI', 'Topic :: Internet :: WWW/HTTP :: WSGI :: Server' ], keywords = 'mod_wsgi wsgi apache', packages = ['mod_wsgi', 'mod_wsgi.server', 'mod_wsgi.server.management', 'mod_wsgi.server.management.commands', 'mod_wsgi.docs', 'mod_wsgi.images'], package_dir = {'mod_wsgi': 'src', 'mod_wsgi.docs': 'docs/_build/html', 'mod_wsgi.images': 'images'}, package_data = {'mod_wsgi.docs': _documentation(), 'mod_wsgi.images': ['snake-whiskey.jpg']}, ext_modules = [extension], entry_points = { 'console_scripts': ['mod_wsgi-express = mod_wsgi.server:main'],}, zip_safe = False, install_requires = standalone and ['mod_wsgi-httpd==2.4.54.1'] or [], python_requires='>=3.8', ) mod_wsgi-5.0.0/src/000077500000000000000000000000001452636074700141315ustar00rootroot00000000000000mod_wsgi-5.0.0/src/__init__.py000066400000000000000000000001021452636074700162330ustar00rootroot00000000000000import pkgutil __path__ = pkgutil.extend_path(__path__, __name__) mod_wsgi-5.0.0/src/server/000077500000000000000000000000001452636074700154375ustar00rootroot00000000000000mod_wsgi-5.0.0/src/server/__init__.py000066400000000000000000004154401452636074700175600ustar00rootroot00000000000000from __future__ import print_function, division, absolute_import import os import sys import shutil import subprocess import optparse import math import signal import threading import atexit import types import re import pprint import time import traceback import locale import inspect import getpass import tempfile import copy import posixpath try: import Queue as queue except ImportError: import queue from . import apxs_config _py_version = '%s%s' % sys.version_info[:2] _py_soabi = '' _py_soext = '.so' _py_dylib = '' try: import sysconfig _py_soabi = sysconfig.get_config_var('SOABI') _py_soext = sysconfig.get_config_var('EXT_SUFFIX') if _py_soext is None: _py_soext = sysconfig.get_config_var('SO') if (sysconfig.get_config_var('WITH_DYLD') and sysconfig.get_config_var('LIBDIR') and sysconfig.get_config_var('LDLIBRARY')): _py_dylib = posixpath.join(sysconfig.get_config_var('LIBDIR'), sysconfig.get_config_var('LDLIBRARY')) if not os.path.exists(_py_dylib): _py_dylib = '' except ImportError: pass MOD_WSGI_SO = 'mod_wsgi-py%s%s' % (_py_version, _py_soext) MOD_WSGI_SO = posixpath.join(posixpath.dirname(__file__), MOD_WSGI_SO) if not os.path.exists(MOD_WSGI_SO) and _py_soabi: MOD_WSGI_SO = 'mod_wsgi-py%s.%s%s' % (_py_version, _py_soabi, _py_soext) MOD_WSGI_SO = posixpath.join(posixpath.dirname(__file__), MOD_WSGI_SO) if not os.path.exists(MOD_WSGI_SO) and os.name == 'nt': MOD_WSGI_SO = 'mod_wsgi%s' % sysconfig.get_config_var('EXT_SUFFIX') MOD_WSGI_SO = os.path.join(os.path.dirname(__file__), MOD_WSGI_SO) MOD_WSGI_SO = MOD_WSGI_SO.replace('\\', '/') def where(): return MOD_WSGI_SO def default_run_user(): if os.name == 'nt': return '#0' try: import pwd uid = os.getuid() return pwd.getpwuid(uid).pw_name except KeyError: return '#%d' % uid def default_run_group(): if os.name == 'nt': return '#0' try: import pwd uid = os.getuid() entry = pwd.getpwuid(uid) except KeyError: return '#%d' % uid try: import grp gid = entry.pw_gid return grp.getgrgid(gid).gr_name except KeyError: return '#%d' % gid def find_program(names, default=None, paths=[]): for name in names: for path in os.environ['PATH'].split(':') + paths: program = posixpath.join(path, name) if os.path.exists(program): return program return default def find_mimetypes(): if os.name == 'nt': return posixpath.join(posixpath.dirname(posixpath.dirname( apxs_config.HTTPD)), 'conf', 'mime.types') else: import mimetypes for name in mimetypes.knownfiles: if os.path.exists(name): return name else: return '/dev/null' SHELL = find_program(['bash', 'sh'], ['/usr/local/bin']) APACHE_GENERAL_CONFIG = """ LoadModule version_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_version.so' ServerName %(host)s ServerRoot '%(server_root)s' PidFile '%(pid_file)s' = 2.4> DefaultRuntimeDir '%(server_root)s' ServerTokens ProductOnly ServerSignature Off User ${MOD_WSGI_USER} Group ${MOD_WSGI_GROUP} Listen %(host)s:%(port)s Listen %(port)s LockFile '%(server_root)s/accept.lock' = 2.4> LoadModule mpm_prefork_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_prefork.so' = 2.4> LoadModule mpm_event_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_event.so' LoadModule mpm_worker_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_worker.so' LoadModule mpm_prefork_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mpm_prefork.so' LoadModule http2_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_http2.so' = 2.4> LoadModule access_compat_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_access_compat.so' LoadModule unixd_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_unixd.so' LoadModule authn_core_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authn_core.so' LoadModule authz_core_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_core.so' LoadModule authz_host_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_host.so' LoadModule mime_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_mime.so' LoadModule rewrite_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_rewrite.so' LoadModule alias_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_alias.so' LoadModule dir_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_dir.so' LoadModule env_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_env.so' LoadModule headers_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_headers.so' LoadModule filter_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_filter.so' LoadModule autoindex_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_autoindex.so' = 2.2.15> LoadModule reqtimeout_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_reqtimeout.so' LoadModule deflate_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_deflate.so' LoadModule auth_basic_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_auth_basic.so' LoadModule auth_digest_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_auth_digest.so' LoadModule authz_user_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_authz_user.so' LoadModule proxy_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_proxy.so LoadModule proxy_http_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_proxy_http.so Loadmodule php5_module '${MOD_WSGI_MODULES_DIRECTORY}/libphp5.so' AddHandler application/x-httpd-php .php LoadFile '%(python_dylib)s' LoadModule wsgi_module '%(mod_wsgi_so)s' LoadModule status_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_status.so' LoadModule cgid_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_cgid.so' LoadModule cgi_module '${MOD_WSGI_MODULES_DIRECTORY}/mod_cgi.so' DefaultType text/plain TypesConfig '%(mime_types)s' HostnameLookups Off MaxMemFree 64 Timeout %(socket_timeout)s ListenBacklog %(server_backlog)s Protocols h2 h2c http/1.1 = 2.2.15> RequestReadTimeout %(request_read_timeout)s LimitRequestBody %(limit_request_body)s AllowOverride None Order deny,allow Deny from all = 2.4> Require all denied WSGIPythonHome '%(python_home)s' WSGIVerboseDebugging '%(verbose_debugging_flag)s' WSGISocketPrefix %(socket_prefix)s/wsgi WSGISocketPrefix %(server_root)s/wsgi WSGISocketRotation Off MaxConnectionsPerChild %(maximum_requests)s WSGIDestroyInterpreter Off WSGIDestroyInterpreter On WSGIRestrictEmbedded On WSGIDaemonProcess %(host)s:%(port)s \\ display-name='%(daemon_name)s' \\ home='%(working_directory)s' \\ processes=%(processes)s \\ threads=%(threads)s \\ maximum-requests=%(maximum_requests)s \\ python-path='%(python_path)s' \\ python-eggs='%(python_eggs)s' \\ lang='%(lang)s' \\ locale='%(locale)s' \\ listen-backlog=%(daemon_backlog)s \\ queue-timeout=%(queue_timeout)s \\ socket-timeout=%(socket_timeout)s \\ connect-timeout=%(connect_timeout)s \\ request-timeout=%(request_timeout)s \\ inactivity-timeout=%(inactivity_timeout)s \\ startup-timeout=%(startup_timeout)s \\ deadlock-timeout=%(deadlock_timeout)s \\ graceful-timeout=%(graceful_timeout)s \\ eviction-timeout=%(eviction_timeout)s \\ restart-interval=%(restart_interval)s \\ cpu-time-limit=%(cpu_time_limit)s \\ shutdown-timeout=%(shutdown_timeout)s \\ send-buffer-size=%(send_buffer_size)s \\ receive-buffer-size=%(receive_buffer_size)s \\ header-buffer-size=%(header_buffer_size)s \\ response-buffer-size=%(response_buffer_size)s \\ response-socket-timeout=%(response_socket_timeout)s \\ server-metrics=%(server_metrics_flag)s WSGIDaemonProcess %(host)s:%(port)s \\ display-name='%(daemon_name)s' \\ home='%(working_directory)s' \\ threads=%(threads)s \\ maximum-requests=%(maximum_requests)s \\ python-path='%(python_path)s' \\ python-eggs='%(python_eggs)s' \\ lang='%(lang)s' \\ locale='%(locale)s' \\ listen-backlog=%(daemon_backlog)s \\ queue-timeout=%(queue_timeout)s \\ socket-timeout=%(socket_timeout)s \\ connect-timeout=%(connect_timeout)s \\ request-timeout=%(request_timeout)s \\ inactivity-timeout=%(inactivity_timeout)s \\ startup-timeout=%(startup_timeout)s \\ deadlock-timeout=%(deadlock_timeout)s \\ graceful-timeout=%(graceful_timeout)s \\ eviction-timeout=%(eviction_timeout)s \\ restart-interval=%(restart_interval)s \\ cpu-time-limit=%(cpu_time_limit)s \\ shutdown-timeout=%(shutdown_timeout)s \\ send-buffer-size=%(send_buffer_size)s \\ receive-buffer-size=%(receive_buffer_size)s \\ response-buffer-size=%(response_buffer_size)s \\ response-socket-timeout=%(response_socket_timeout)s \\ server-metrics=%(server_metrics_flag)s WSGICallableObject '%(callable_object)s' WSGIPassAuthorization On WSGIMapHEADToGET %(map_head_to_get)s WSGIScriptReloading Off WSGIPythonPath '%(python_path)s' WSGIRestrictStdin Off WSGIPythonPath '%(python_path)s' ExtendedStatus On WSGIServerMetrics %(server_metrics_flag)s SetHandler server-status Order deny,allow Deny from all Allow from localhost = 2.4> Require all denied Require host localhost KeepAlive On KeepAliveTimeout %(keep_alive_timeout)s KeepAlive Off EnableSendfile On WSGIEnableSendfile On AddOutputFilterByType DEFLATE text/plain AddOutputFilterByType DEFLATE text/html AddOutputFilterByType DEFLATE text/xml AddOutputFilterByType DEFLATE text/css AddOutputFilterByType DEFLATE text/javascript AddOutputFilterByType DEFLATE application/xhtml+xml AddOutputFilterByType DEFLATE application/javascript AddOutputFilterByType DEFLATE application/json ErrorLog "|%(rotatelogs_executable)s \\ %(error_log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM" ErrorLog "%(error_log_file)s" LogLevel %(log_level)s ErrorLogFormat "%(error_log_format)s" LoadModule log_config_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_log_config.so LogFormat "%%h %%l %%u %%t \\"%%r\\" %%>s %%b" common LogFormat "%%h %%l %%u %%t \\"%%r\\" %%>s %%b \\"%%{Referer}i\\" \\"%%{User-agent}i\\"" combined LogFormat "%(access_log_format)s" custom CustomLog "|%(rotatelogs_executable)s \\ %(access_log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM" %(log_format_nickname)s CustomLog "%(access_log_file)s" %(log_format_nickname)s WSGIChunkedRequest On WSGITrustedProxyHeaders %(trusted_proxy_headers)s WSGITrustedProxies %(trusted_proxies)s LoadModule ssl_module ${MOD_WSGI_MODULES_DIRECTORY}/mod_ssl.so ServerLimit %(prefork_server_limit)s StartServers %(prefork_start_servers)s MaxClients %(prefork_max_clients)s MinSpareServers %(prefork_min_spare_servers)s MaxSpareServers %(prefork_max_spare_servers)s ServerLimit 1 StartServers 1 MaxClients 1 MinSpareServers 1 MaxSpareServers 1 MaxRequestsPerChild 0 ServerLimit %(worker_server_limit)s ThreadLimit %(worker_thread_limit)s StartServers %(worker_start_servers)s MaxClients %(worker_max_clients)s MinSpareThreads %(worker_min_spare_threads)s MaxSpareThreads %(worker_max_spare_threads)s ThreadsPerChild %(worker_threads_per_child)s ServerLimit 1 ThreadLimit 1 StartServers 1 MaxClients 1 MinSpareThreads 1 MaxSpareThreads 1 ThreadsPerChild 1 MaxRequestsPerChild 0 ThreadStackSize 262144 ServerLimit %(worker_server_limit)s ThreadLimit %(worker_thread_limit)s StartServers %(worker_start_servers)s MaxClients %(worker_max_clients)s MinSpareThreads %(worker_min_spare_threads)s MaxSpareThreads %(worker_max_spare_threads)s ThreadsPerChild %(worker_threads_per_child)s ServerLimit 1 ThreadLimit 1 StartServers 1 MaxClients 1 MinSpareThreads 1 MaxSpareThreads 1 ThreadsPerChild 1 MaxRequestsPerChild 0 ThreadStackSize 262144 NameVirtualHost *:%(port)s NameVirtualHost *:%(port)s Order deny,allow Deny from all = 2.4> Require all denied Allow from localhost ServerName %(server_name)s ServerAlias %(server_aliases)s ServerName %(parent_domain)s Redirect permanent / http://%(server_name)s:%(port)s/ ServerName %(server_name)s ServerAlias %(server_aliases)s RewriteEngine On RewriteCond %%{HTTPS} off RewriteRule (.*) https://%(server_name)s:%(https_port)s%%{REQUEST_URI} ServerName %(parent_domain)s RewriteEngine On RewriteCond %%{HTTPS} off RewriteRule (.*) https://%(server_name)s:%(https_port)s%%{REQUEST_URI} Listen %(host)s:%(https_port)s Listen %(https_port)s NameVirtualHost *:%(https_port)s Order deny,allow Deny from all = 2.4> Require all denied Allow from localhost SSLEngine On SSLCertificateFile %(ssl_certificate_file)s SSLCertificateKeyFile %(ssl_certificate_key_file)s SSLCACertificateFile %(ssl_ca_certificate_file)s SSLVerifyClient none SSLCertificateChainFile %(ssl_certificate_chain_file)s ServerName %(server_name)s ServerAlias %(server_aliases)s SSLEngine On SSLCertificateFile %(ssl_certificate_file)s SSLCertificateKeyFile %(ssl_certificate_key_file)s SSLCACertificateFile %(ssl_ca_certificate_file)s SSLVerifyClient none SSLCertificateChainFile %(ssl_certificate_chain_file)s Header set Strict-Transport-Security %(hsts_policy)s SSLOptions +StdEnvVars ServerName %(parent_domain)s Redirect permanent / https://%(server_name)s:%(https_port)s/ SSLEngine On SSLCertificateFile %(ssl_certificate_file)s SSLCertificateKeyFile %(ssl_certificate_key_file)s SSLCACertificateFile %(ssl_ca_certificate_file)s SSLVerifyClient none SSLCertificateChainFile %(ssl_certificate_chain_file)s DocumentRoot '%(document_root)s' AccessFileName .htaccess AllowOverride %(allow_override)s Order allow,deny Allow from all = 2.4> Require all granted AllowOverride %(allow_override)s DirectoryIndex %(directory_index)s Options +Indexes Options +ExecCGI Options +ExecCGI RewriteEngine On Include %(rewrite_rules)s Order allow,deny Allow from all = 2.4> Require all granted RewriteCond %%{REQUEST_FILENAME} !-f RewriteCond %%{REQUEST_FILENAME} !-d RewriteCond %%{REQUEST_URI} !/server-status RewriteRule .* - [H=wsgi-handler] WSGIErrorOverride On WSGIAccessScript '%(host_access_script)s' AuthType %(auth_type)s AuthName '%(host)s:%(port)s' Auth%(auth_type)sProvider wsgi WSGIAuthUserScript '%(auth_user_script)s' WSGIAuthGroupScript '%(auth_group_script)s' Require valid-user Require wsgi-group '%(auth_group)s' = 2.4> Require valid-user Require wsgi-group '%(auth_group)s' WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\ process-group='%(host)s:%(port)s' application-group=%%{GLOBAL} WSGIImportScript '%(server_root)s/handler.wsgi' \\ process-group='%(host)s:%(port)s' application-group=%%{GLOBAL} WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\ process-group='%%{GLOBAL}' application-group=%%{GLOBAL} WSGIImportScript '%(server_root)s/handler.wsgi' \\ process-group='%%{GLOBAL}' application-group=%%{GLOBAL} WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\ process-group='%%{GLOBAL}' application-group=%%{GLOBAL} WSGIImportScript '%(server_root)s/handler.wsgi' \\ process-group='%%{GLOBAL}' application-group=%%{GLOBAL} WSGIHandlerScript wsgi-handler '%(server_root)s/handler.wsgi' \\ application-group=%%{GLOBAL} WSGIImportScript '%(server_root)s/handler.wsgi' \\ application-group=%%{GLOBAL} """ APACHE_IGNORE_ACTIVITY_CONFIG = """ WSGIIgnoreActivity On """ APACHE_PROXY_PASS_MOUNT_POINT_CONFIG = """ ProxyPass '%(mount_point)s' '%(url)s' ProxyPassReverse '%(mount_point)s' '%(url)s' RewriteEngine On RewriteRule .* - [E=SERVER_PORT:%%{SERVER_PORT},NE] RequestHeader set X-Forwarded-Port %%{SERVER_PORT}e RewriteCond %%{HTTPS} on RewriteRule .* - [E=URL_SCHEME:https,NE] RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME """ APACHE_PROXY_PASS_MOUNT_POINT_SLASH_CONFIG = """ ProxyPass '%(mount_point)s/' '%(url)s/' ProxyPassReverse '%(mount_point)s/' '%(url)s/' RewriteEngine On RewriteRule .* - [E=SERVER_PORT:%%{SERVER_PORT},NE] RequestHeader set X-Forwarded-Port %%{SERVER_PORT}e RewriteCond %%{HTTPS} on RewriteRule .* - [E=URL_SCHEME:https,NE] RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME RewriteEngine On RewriteRule - http://%%{HTTP_HOST}%%{REQUEST_URI}/ [R=302,L] """ APACHE_PROXY_PASS_HOST_CONFIG = """ ServerName %(host)s ProxyPass / '%(url)s' ProxyPassReverse / '%(url)s' RequestHeader set X-Forwarded-Port %(port)s RewriteEngine On RewriteCond %%{HTTPS} on RewriteRule .* - [E=URL_SCHEME:https,NE] RequestHeader set X-Forwarded-Scheme %%{URL_SCHEME}e env=URL_SCHEME """ APACHE_ALIAS_DIRECTORY_CONFIG = """ Alias '%(mount_point)s' '%(directory)s' AllowOverride %(allow_override)s Order allow,deny Allow from all = 2.4> Require all granted """ APACHE_ALIAS_FILENAME_CONFIG = """ Alias '%(mount_point)s' '%(directory)s/%(filename)s' Order allow,deny Allow from all = 2.4> Require all granted """ APACHE_ALIAS_DOCUMENTATION = """ Alias /__wsgi__/docs '%(documentation_directory)s' Alias /__wsgi__/images '%(images_directory)s' DirectoryIndex index.html Order allow,deny Allow from all = 2.4> Require all granted Order allow,deny Allow from all = 2.4> Require all granted """ APACHE_VERIFY_CLIENT_CONFIG = """ SSLVerifyClient require SSLVerifyDepth 1 """ APACHE_ERROR_DOCUMENT_CONFIG = """ ErrorDocument '%(status)s' '%(document)s' """ APACHE_SETENV_CONFIG = """ SetEnv '%(name)s' '%(value)s' """ APACHE_PASSENV_CONFIG = """ PassEnv '%(name)s' """ APACHE_HANDLER_SCRIPT_CONFIG = """ WSGIHandlerScript wsgi-resource '%(server_root)s/resource.wsgi' \\ process-group='%(host)s:%(port)s' application-group=%%{GLOBAL} """ APACHE_HANDLER_CONFIG = """ AddHandler %(handler)s %(extension)s """ APACHE_INCLUDE_CONFIG = """ Include '%(filename)s' """ APACHE_TOOLS_CONFIG = """ WSGIDaemonProcess express display-name=%%{GROUP} threads=1 server-metrics=On """ APACHE_METRICS_CONFIG = """ WSGIImportScript '%(server_root)s/server-metrics.py' \\ process-group=express application-group=server-metrics """ APACHE_SERVICE_CONFIG = """ WSGIDaemonProcess 'service:%(name)s' \\ display-name=%%{GROUP} \\ user='%(user)s' \\ group='%(group)s' \\ home='%(working_directory)s' \\ threads=0 \\ python-path='%(python_path)s' \\ python-eggs='%(python_eggs)s' \\ lang='%(lang)s' \\ locale='%(locale)s' \\ server-metrics=%(server_metrics_flag)s WSGIImportScript '%(script)s' \\ process-group='service:%(name)s' \\ application-group=%%{GLOBAL} """ APACHE_SERVICE_WITH_LOG_CONFIG = """ ErrorLog "|%(rotatelogs_executable)s \\ %(log_directory)s/%(log_file)s.%%Y-%%m-%%d-%%H_%%M_%%S %(max_log_size)sM" ErrorLog "%(log_directory)s/%(log_file)s" WSGIDaemonProcess 'service:%(name)s' \\ display-name=%%{GROUP} \\ user='%(user)s' \\ group='%(group)s' \\ home='%(working_directory)s' \\ threads=0 \\ python-path='%(python_path)s' \\ python-eggs='%(python_eggs)s' \\ lang='%(lang)s' \\ locale='%(locale)s' \\ server-metrics=%(server_metrics_flag)s WSGIImportScript '%(script)s' \\ process-group='service:%(name)s' \\ application-group=%%{GLOBAL} """ def generate_apache_config(options): with open(options['httpd_conf'], 'w') as fp: print(APACHE_GENERAL_CONFIG % options, file=fp) if options['ignore_activity']: for url in options['ignore_activity']: print(APACHE_IGNORE_ACTIVITY_CONFIG % dict(url=url), file=fp) if options['proxy_mount_points']: for mount_point, url in options['proxy_mount_points']: if mount_point.endswith('/'): print(APACHE_PROXY_PASS_MOUNT_POINT_CONFIG % dict( mount_point=mount_point, url=url), file=fp) else: print(APACHE_PROXY_PASS_MOUNT_POINT_SLASH_CONFIG % dict( mount_point=mount_point, url=url), file=fp) if options['proxy_virtual_hosts']: for host, url in options['proxy_virtual_hosts']: print(APACHE_PROXY_PASS_HOST_CONFIG % dict( host=host, port=options['port'], url=url), file=fp) if options['url_aliases']: for mount_point, target in sorted(options['url_aliases'], reverse=True): path = posixpath.abspath(target) if os.path.isdir(path) or not os.path.exists(path): if target.endswith('/') and path != '/': directory = path + '/' else: directory = path print(APACHE_ALIAS_DIRECTORY_CONFIG % dict( mount_point=mount_point, directory=directory, allow_override=options['allow_override']), file=fp) else: directory = posixpath.dirname(path) filename = posixpath.basename(path) print(APACHE_ALIAS_FILENAME_CONFIG % dict( mount_point=mount_point, directory=directory, filename=filename), file=fp) if options['enable_docs']: print(APACHE_ALIAS_DOCUMENTATION % options, file=fp) if options['error_documents']: for status, document in options['error_documents']: print(APACHE_ERROR_DOCUMENT_CONFIG % dict(status=status, document=document.replace("'", "\\'")), file=fp) if options['ssl_verify_client_urls']: paths = sorted(options['ssl_verify_client_urls'], reverse=True) for path in paths: print(APACHE_VERIFY_CLIENT_CONFIG % dict(path=path), file=fp) else: print(APACHE_VERIFY_CLIENT_CONFIG % dict(path='/'), file=fp) if options['setenv_variables']: for name, value in options['setenv_variables']: print(APACHE_SETENV_CONFIG % dict(name=name, value=value), file=fp) if options['passenv_variables']: for name in options['passenv_variables']: print(APACHE_PASSENV_CONFIG % dict(name=name), file=fp) if options['handler_scripts']: print(APACHE_HANDLER_SCRIPT_CONFIG % options, file=fp) for extension, script in options['handler_scripts']: print(APACHE_HANDLER_CONFIG % dict(handler='wsgi-resource', extension=extension), file=fp) if options['with_cgi']: print(APACHE_HANDLER_CONFIG % dict(handler='cgi-script', extension='.cgi'), file=fp) if options['service_scripts']: service_log_files = {} if options['service_log_files']: service_log_files.update(options['service_log_files']) users = dict(options['service_users'] or []) groups = dict(options['service_groups'] or []) for name, script in options['service_scripts']: user = users.get(name, '${MOD_WSGI_USER}') group = groups.get(name, '${MOD_WSGI_GROUP}') if name in service_log_files: print(APACHE_SERVICE_WITH_LOG_CONFIG % dict(name=name, user=user, group=group, script=script, port=options['port'], log_directory=options['log_directory'], log_file=service_log_files[name], rotatelogs_executable=options['rotatelogs_executable'], max_log_size=options['max_log_size'], python_path=options['python_path'], working_directory=options['working_directory'], python_eggs=options['python_eggs'], lang=options['lang'], locale=options['locale'], server_metrics_flag=options['server_metrics_flag']), file=fp) else: print(APACHE_SERVICE_CONFIG % dict(name=name, user=user, group=group, script=script, python_path=options['python_path'], working_directory=options['working_directory'], python_eggs=options['python_eggs'], lang=options['lang'], locale=options['locale'], server_metrics_flag=options['server_metrics_flag']), file=fp) if options['include_files']: for filename in options['include_files']: filename = posixpath.abspath(filename) print(APACHE_INCLUDE_CONFIG % dict(filename=filename), file=fp) if options['with_newrelic_platform']: print(APACHE_TOOLS_CONFIG % options, file=fp) if options['with_newrelic_platform']: print(APACHE_METRICS_CONFIG % options, file=fp) _interval = 1.0 _times = {} _files = [] _running = False _queue = queue.Queue() _lock = threading.Lock() def _restart(path): _queue.put(True) prefix = 'monitor (pid=%d):' % os.getpid() print('%s Change detected to "%s".' % (prefix, path), file=sys.stderr) print('%s Triggering process restart.' % prefix, file=sys.stderr) os.kill(os.getpid(), signal.SIGINT) def _modified(path): try: # If path doesn't denote a file and were previously # tracking it, then it has been removed or the file type # has changed so force a restart. If not previously # tracking the file then we can ignore it as probably # pseudo reference such as when file extracted from a # collection of modules contained in a zip file. if not os.path.isfile(path): return path in _times # Check for when file last modified. mtime = os.stat(path).st_mtime if path not in _times: _times[path] = mtime # Force restart when modification time has changed, even # if time now older, as that could indicate older file # has been restored. if mtime != _times[path]: return True except Exception: # If any exception occured, likely that file has been # been removed just before stat(), so force a restart. return True return False def _monitor(): global _files while True: # Check modification times on all files in sys.modules. for module in list(sys.modules.values()): if not hasattr(module, '__file__'): continue path = getattr(module, '__file__') if not path: continue if os.path.splitext(path)[1] in ['.pyc', '.pyo', '.pyd']: path = path[:-1] if _modified(path): return _restart(path) # Check modification times on files which have # specifically been registered for monitoring. for path in _files: if _modified(path): return _restart(path) # Go to sleep for specified interval. try: return _queue.get(timeout=_interval) except queue.Empty: pass _thread = threading.Thread(target=_monitor) _thread.setDaemon(True) def _exiting(): try: _queue.put(True) except Exception: pass _thread.join() def track_changes(path): if not path in _files: _files.append(path) def start_reloader(interval=1.0): global _interval if interval < _interval: _interval = interval global _running _lock.acquire() if not _running: prefix = 'monitor (pid=%d):' % os.getpid() print('%s Starting change monitor.' % prefix, file=sys.stderr) _running = True _thread.start() atexit.register(_exiting) _lock.release() class PostMortemDebugger(object): def __init__(self, application, startup): self.application = application self.generator = None import pdb self.debugger = pdb.Pdb() if startup: self.activate_console() def activate_console(self): self.debugger.set_trace(sys._getframe().f_back) def run_post_mortem(self): self.debugger.reset() self.debugger.interaction(None, sys.exc_info()[2]) def __call__(self, environ, start_response): try: self.generator = self.application(environ, start_response) return self except Exception: self.run_post_mortem() raise def __iter__(self): try: for item in self.generator: yield item except Exception: self.run_post_mortem() raise def close(self): try: if hasattr(self.generator, 'close'): return self.generator.close() except Exception: self.run_post_mortem() raise class RequestRecorder(object): def __init__(self, application, savedir): self.application = application self.savedir = savedir self.lock = threading.Lock() self.pid = os.getpid() self.count = 0 def __call__(self, environ, start_response): with self.lock: self.count += 1 count = self.count key = "%s-%s-%s" % (int(time.time()*1000000), self.pid, count) iheaders = os.path.join(self.savedir, key + ".iheaders") iheaders_fp = open(iheaders, 'w') icontent = os.path.join(self.savedir, key + ".icontent") icontent_fp = open(icontent, 'w+b') oheaders = os.path.join(self.savedir, key + ".oheaders") oheaders_fp = open(oheaders, 'w') ocontent = os.path.join(self.savedir, key + ".ocontent") ocontent_fp = open(ocontent, 'w+b') oaexcept = os.path.join(self.savedir, key + ".oaexcept") oaexcept_fp = open(oaexcept, 'w') orexcept = os.path.join(self.savedir, key + ".orexcept") orexcept_fp = open(orexcept, 'w') ofexcept = os.path.join(self.savedir, key + ".ofexcept") ofexcept_fp = open(ofexcept, 'w') errors = environ['wsgi.errors'] pprint.pprint(environ, stream=iheaders_fp) iheaders_fp.close() input = environ['wsgi.input'] data = input.read(8192) while data: icontent_fp.write(data) data = input.read(8192) icontent_fp.flush() icontent_fp.seek(0, os.SEEK_SET) environ['wsgi.input'] = icontent_fp def _start_response(status, response_headers, *args): pprint.pprint(((status, response_headers)+args), stream=oheaders_fp) _write = start_response(status, response_headers, *args) def write(self, data): ocontent_fp.write(data) ocontent_fp.flush() return _write(data) return write try: try: result = self.application(environ, _start_response) except: traceback.print_exception(*sys.exc_info(), file=oaexcept_fp) raise try: for data in result: ocontent_fp.write(data) ocontent_fp.flush() yield data except: traceback.print_exception(*sys.exc_info(), file=orexcept_fp) raise finally: try: if hasattr(result, 'close'): result.close() except: traceback.print_exception(*sys.exc_info(), file=ofexcept_fp) raise finally: oheaders_fp.close() ocontent_fp.close() oaexcept_fp.close() orexcept_fp.close() ofexcept_fp.close() class ApplicationHandler(object): def __init__(self, entry_point, application_type='script', callable_object='application', mount_point='/', with_newrelic_agent=False, debug_mode=False, enable_debugger=False, debugger_startup=False, enable_recorder=False, recorder_directory=None): self.entry_point = entry_point self.application_type = application_type self.callable_object = callable_object self.mount_point = mount_point if application_type == 'module': __import__(entry_point) self.module = sys.modules[entry_point] self.application = getattr(self.module, callable_object) self.target = self.module.__file__ parts = os.path.splitext(self.target)[-1] if parts[-1].lower() in ('.pyc', '.pyd', '.pyd'): self.target = parts[0] + '.py' elif application_type == 'paste': from paste.deploy import loadapp self.application = loadapp('config:%s' % entry_point) self.target = entry_point elif application_type != 'static': self.module = types.ModuleType('__wsgi__') self.module.__file__ = entry_point with open(entry_point, 'r') as fp: code = compile(fp.read(), entry_point, 'exec', dont_inherit=True) exec(code, self.module.__dict__) sys.modules['__wsgi__'] = self.module self.application = getattr(self.module, callable_object) self.target = entry_point try: self.mtime = os.path.getmtime(self.target) except Exception: self.mtime = None if with_newrelic_agent: self.setup_newrelic_agent() self.debug_mode = debug_mode self.enable_debugger = enable_debugger if enable_debugger: self.setup_debugger(debugger_startup) if enable_recorder: self.setup_recorder(recorder_directory) def setup_newrelic_agent(self): import newrelic.agent config_file = os.environ.get('NEW_RELIC_CONFIG_FILE') environment = os.environ.get('NEW_RELIC_ENVIRONMENT') global_settings = newrelic.agent.global_settings() if global_settings.log_file is None: global_settings.log_file = 'stderr' newrelic.agent.initialize(config_file, environment) newrelic.agent.register_application() self.application = newrelic.agent.WSGIApplicationWrapper( self.application) def setup_debugger(self, startup): self.application = PostMortemDebugger(self.application, startup) def setup_recorder(self, savedir): self.application = RequestRecorder(self.application, savedir) def reload_required(self, resource): if self.debug_mode: return False try: mtime = os.path.getmtime(self.target) except Exception: mtime = None return mtime != self.mtime def handle_request(self, environ, start_response): # Strip out the leading component due to internal redirect in # Apache when using web application as fallback resource. mount_point = environ.get('mod_wsgi.mount_point') script_name = environ.get('SCRIPT_NAME') path_info = environ.get('PATH_INFO') if mount_point is not None: # If this is set then it means that SCRIPT_NAME was # overridden by a trusted proxy header. In this case # we want to ignore any local mount point, simply # stripping it from the path. script_name = environ['mod_wsgi.script_name'] environ['PATH_INFO'] = script_name + path_info if self.mount_point != '/': if environ['PATH_INFO'].startswith(self.mount_point): environ['PATH_INFO'] = environ['PATH_INFO'][len( self.mount_point):] else: environ['SCRIPT_NAME'] = '' environ['PATH_INFO'] = script_name + path_info if self.mount_point != '/': if environ['PATH_INFO'].startswith(self.mount_point): environ['SCRIPT_NAME'] = self.mount_point environ['PATH_INFO'] = environ['PATH_INFO'][len( self.mount_point):] return self.application(environ, start_response) def __call__(self, environ, start_response): return self.handle_request(environ, start_response) class ResourceHandler(object): def __init__(self, resources): self.resources = {} for extension, script in resources: extension_name = re.sub(r'[^\w]{1}', '_', extension) module_name = '__wsgi_resource%s__' % extension_name module = types.ModuleType(module_name) module.__file__ = script with open(script, 'r') as fp: code = compile(fp.read(), script, 'exec', dont_inherit=True) exec(code, module.__dict__) sys.modules[module_name] = module self.resources[extension] = module def resource_extension(self, resource): return os.path.splitext(resource)[-1] def reload_required(self, resource): extension = self.resource_extension(resource) function = getattr(self.resources[extension], 'reload_required', None) if function is not None: return function(resource) return False def handle_request(self, environ, start_response): resource = environ['SCRIPT_NAME'] extension = self.resource_extension(resource) module = self.resources[extension] function = getattr(module, 'handle_request', None) if function is not None: return function(environ, start_response) function = getattr(module, 'application') return function(environ, start_response) def __call__(self, environ, start_response): return self.handle_request(environ, start_response) WSGI_HANDLER_SCRIPT = """ import os import sys import atexit import time import mod_wsgi.server working_directory = r'%(working_directory)s' entry_point = r'%(entry_point)s' application_type = '%(application_type)s' callable_object = '%(callable_object)s' mount_point = '%(mount_point)s' with_newrelic_agent = %(with_newrelic_agent)s newrelic_config_file = '%(newrelic_config_file)s' newrelic_environment = '%(newrelic_environment)s' disable_reloading = %(disable_reloading)s reload_on_changes = %(reload_on_changes)s debug_mode = %(debug_mode)s enable_debugger = %(enable_debugger)s debugger_startup = %(debugger_startup)s enable_coverage = %(enable_coverage)s coverage_directory = '%(coverage_directory)s' enable_profiler = %(enable_profiler)s profiler_directory = '%(profiler_directory)s' enable_recorder = %(enable_recorder)s recorder_directory = '%(recorder_directory)s' enable_gdb = %(enable_gdb)s os.environ['MOD_WSGI_EXPRESS'] = 'true' os.environ['MOD_WSGI_SERVER_NAME'] = '%(server_host)s' os.environ['MOD_WSGI_SERVER_ALIASES'] = %(server_aliases)r or '' if reload_on_changes: os.environ['MOD_WSGI_RELOADER_ENABLED'] = 'true' if debug_mode: os.environ['MOD_WSGI_DEBUG_MODE'] = 'true' # We need to fiddle sys.path as we are not using daemon mode and so # the working directory will not be added to sys.path by virtue of # 'home' option to WSGIDaemonProcess directive. We could use the # WSGIPythonPath directive, but that will cause .pth files to also # be evaluated. sys.path.insert(0, working_directory) if enable_debugger: os.environ['MOD_WSGI_DEBUGGER_ENABLED'] = 'true' def output_coverage_report(): coverage_info.stop() coverage_info.html_report(directory=coverage_directory) if enable_coverage: os.environ['MOD_WSGI_COVERAGE_ENABLED'] = 'true' from coverage import coverage coverage_info = coverage() coverage_info.start() atexit.register(output_coverage_report) def output_profiler_data(): profiler_info.disable() output_file = '%%s-%%d.pstats' %% (int(time.time()*1000000), os.getpid()) output_file = os.path.join(profiler_directory, output_file) profiler_info.dump_stats(output_file) if enable_profiler: os.environ['MOD_WSGI_PROFILER_ENABLED'] = 'true' from cProfile import Profile profiler_info = Profile() profiler_info.enable() atexit.register(output_profiler_data) if enable_recorder: os.environ['MOD_WSGI_RECORDER_ENABLED'] = 'true' if enable_gdb: os.environ['MOD_WSGI_GDB_ENABLED'] = 'true' if with_newrelic_agent: if newrelic_config_file: os.environ['NEW_RELIC_CONFIG_FILE'] = newrelic_config_file if newrelic_environment: os.environ['NEW_RELIC_ENVIRONMENT'] = newrelic_environment handler = mod_wsgi.server.ApplicationHandler(entry_point, application_type=application_type, callable_object=callable_object, mount_point=mount_point, with_newrelic_agent=with_newrelic_agent, debug_mode=debug_mode, enable_debugger=enable_debugger, debugger_startup=debugger_startup, enable_recorder=enable_recorder, recorder_directory=recorder_directory) if not disable_reloading: reload_required = handler.reload_required handle_request = handler.handle_request if not disable_reloading and reload_on_changes and not debug_mode: mod_wsgi.server.start_reloader() """ WSGI_RESOURCE_SCRIPT = """ import mod_wsgi.server resources = %(resources)s handler = mod_wsgi.server.ResourceHandler(resources) reload_required = handler.reload_required handle_request = handler.handle_request """ WSGI_DEFAULT_SCRIPT = """ CONTENT = b''' My web site runs on Malt Whiskey
My web site
runs on
Malt Whiskey

For further information on configuring mod_wsgi,
see the documentation.
''' def application(environ, start_response): status = '200 OK' output = CONTENT response_headers = [('Content-type', 'text/html'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] """ def generate_wsgi_handler_script(options): path = os.path.join(options['server_root'], 'handler.wsgi') with open(path, 'w') as fp: print(WSGI_HANDLER_SCRIPT % options, file=fp) path = os.path.join(options['server_root'], 'resource.wsgi') with open(path, 'w') as fp: print(WSGI_RESOURCE_SCRIPT % dict(resources=repr( options['handler_scripts'])), file=fp) path = os.path.join(options['server_root'], 'default.wsgi') with open(path, 'w') as fp: print(WSGI_DEFAULT_SCRIPT % options, file=fp) SERVER_METRICS_SCRIPT = """ import os import logging newrelic_config_file = '%(newrelic_config_file)s' newrelic_environment = '%(newrelic_environment)s' with_newrelic_platform = %(with_newrelic_platform)s if with_newrelic_platform: if newrelic_config_file: os.environ['NEW_RELIC_CONFIG_FILE'] = newrelic_config_file if newrelic_environment: os.environ['NEW_RELIC_ENVIRONMENT'] = newrelic_environment logging.basicConfig(level=logging.INFO, format='%%(name)s (pid=%%(process)d, level=%%(levelname)s): %%(message)s') _logger = logging.getLogger(__name__) try: from mod_wsgi.metrics.newrelic import Agent agent = Agent() agent.start() except ImportError: _logger.fatal('The module mod_wsgi.metrics.newrelic is not available. ' 'The New Relic platform plugin has been disabled. Install the ' '"mod_wsgi-metrics" package.') """ def generate_server_metrics_script(options): path = os.path.join(options['server_root'], 'server-metrics.py') with open(path, 'w') as fp: print(SERVER_METRICS_SCRIPT % options, file=fp) WSGI_CONTROL_SCRIPT = """ #!%(shell_executable)s # %(sys_argv)s HTTPD="%(httpd_executable)s" HTTPD_ARGS="%(httpd_arguments)s" HTTPD_COMMAND="$HTTPD $HTTPD_ARGS" MOD_WSGI_MODULES_DIRECTORY="%(modules_directory)s" export MOD_WSGI_MODULES_DIRECTORY SHLIBPATH="%(shlibpath)s" if [ "x$SHLIBPATH" != "x" ]; then %(shlibpath_var)s="$SHLIBPATH:$%(shlibpath_var)s" export %(shlibpath_var)s fi MOD_WSGI_SERVER_ROOT="%(server_root)s" export MOD_WSGI_SERVER_ROOT MOD_WSGI_LISTENER_HOST="%(host)s" export MOD_WSGI_LISTENER_HOST MOD_WSGI_HTTP_PORT="%(port)s" MOD_WSGI_HTTPS_PORT="%(https_port)s" export MOD_WSGI_HTTP_PORT export MOD_WSGI_HTTPS_PORT WSGI_RUN_USER="${WSGI_RUN_USER:-%(user)s}" WSGI_RUN_GROUP="${WSGI_RUN_GROUP:-%(group)s}" MOD_WSGI_USER="${MOD_WSGI_USER:-${WSGI_RUN_USER}}" MOD_WSGI_GROUP="${MOD_WSGI_GROUP:-${WSGI_RUN_GROUP}}" export MOD_WSGI_USER export MOD_WSGI_GROUP if [ `id -u` = "0" -a ${MOD_WSGI_USER} = "root" ]; then cat << EOF WARNING: When running as the 'root' user, it is required that the options '--user' and '--group' be specified to mod_wsgi-express. These should define a non 'root' user and group under which the Apache child worker processes and mod_wsgi daemon processes should be run. Failure to specify these options will result in Apache and/or the mod_wsgi daemon processes failing to start. See the mod_wsgi-express documentation for further information on this restriction. EOF fi MOD_WSGI_WORKING_DIRECTORY="%(working_directory)s" export MOD_WSGI_WORKING_DIRECTORY LANG='%(lang)s' LC_ALL='%(locale)s' export LANG export LC_ALL ACMD="$1" ARGV="$@" if test -f %(server_root)s/envvars; then . %(server_root)s/envvars fi STATUSURL="http://%(host)s:%(port)s/server-status" if [ "x$ARGV" = "x" ]; then ARGV="-h" fi GDB="%(gdb_executable)s" ENABLE_GDB="%(enable_gdb)s" PROCESS_NAME="%(process_name)s" cd $MOD_WSGI_WORKING_DIRECTORY case $ACMD in start|stop|restart|graceful|graceful-stop) if [ "x$ENABLE_GDB" != "xTrue" ]; then exec -a "$PROCESS_NAME" $HTTPD_COMMAND -k $ARGV else echo "run $HTTPD_ARGS -k $ARGV" > %(server_root)s/gdb.cmds gdb -x %(server_root)s/gdb.cmds $HTTPD fi ;; configtest) exec $HTTPD_COMMAND -t ;; status) exec %(python_executable)s -m webbrowser -t $STATUSURL ;; *) exec $HTTPD_COMMAND $ARGV esac """ APACHE_ENVVARS_FILE = """ . %(envvars_script)s """ def generate_control_scripts(options): path = os.path.join(options['server_root'], 'apachectl') with open(path, 'w') as fp: print(WSGI_CONTROL_SCRIPT.lstrip() % options, file=fp) os.chmod(path, 0o755) path = os.path.join(options['server_root'], 'envvars') if options['envvars_script']: with open(path, 'w') as fp: if options['envvars_script']: print(APACHE_ENVVARS_FILE.lstrip() % options, file=fp) elif not os.path.isfile(path): with open(path, 'w') as fp: pass def check_percentage(option, opt_str, value, parser): if value is not None and value < 0 or value > 1: raise optparse.OptionValueError('%s option value needs to be within ' 'the range 0 to 1.' % opt_str) setattr(parser.values, option.dest, value) option_list = [] def add_option(platforms, *args, **kwargs): targets = platforms.split('|') suppress = False if os.name == 'nt': if 'all' not in targets and 'windows' not in targets: suppress = True else: if 'all' not in targets and 'unix' not in targets: suppress = True if suppress: kwargs['help'] = optparse.SUPPRESS_HELP if 'hidden' in targets: kwargs['help'] = optparse.SUPPRESS_HELP option_list.append(optparse.make_option(*args, **kwargs)) add_option('all', '--application-type', default='script', metavar='TYPE', help='The type of WSGI application entry point ' 'that was provided. Defaults to \'script\', indicating the ' 'traditional mod_wsgi style WSGI script file specified by a ' 'filesystem path. Alternatively one can supply \'module\', ' 'indicating that the provided entry point is a Python module ' 'which should be imported using the standard Python import ' 'mechanism, or \'paste\' indicating that the provided entry ' 'point is a Paste deployment configuration file. If you want ' 'to just use the server to host static files only, then you ' 'can also instead supply \'static\' with the target being ' 'the directory containing the files to server or the current ' 'directory if none is supplied.') add_option('all', '--entry-point', default=None, metavar='FILE-PATH|MODULE', help='The file system path or ' 'module name identifying the file which contains the WSGI ' 'application entry point. How the value given is interpreted ' 'depends on the corresponding type identified using the ' '\'--application-type\' option. Use of this option is the ' 'same as if the value had been given as argument but without ' 'any option specifier. A named option is also provided so ' 'as to make it clearer in a long option list what the entry ' 'point actually is. If both methods are used, that specified ' 'by this option will take precedence.') add_option('all', '--host', default=None, metavar='IP-ADDRESS', help='The specific host (IP address) interface on which ' 'requests are to be accepted. Defaults to listening on ' 'all host interfaces.') add_option('all', '--port', default=8000, type='int', metavar='NUMBER', help='The specific port to bind to and ' 'on which requests are to be accepted. Defaults to port 8000.') add_option('all', '--http2', action='store_true', default=False, help='Flag indicating whether HTTP/2 should be enabled.' 'Requires the mod_http2 module to be available.') add_option('all', '--https-port', type='int', metavar='NUMBER', help='The specific port to bind to and on which secure ' 'requests are to be accepted.') add_option('all', '--ssl-port', type='int', metavar='NUMBER', dest='https_port', help=optparse.SUPPRESS_HELP) add_option('all', '--ssl-certificate-file', default=None, metavar='FILE-PATH', help='Specify the path to the SSL ' 'certificate file.') add_option('all', '--ssl-certificate-key-file', default=None, metavar='FILE-PATH', help='Specify the path to the private ' 'key file corresponding to the SSL certificate file.') add_option('all', '--ssl-certificate', default=None, metavar='FILE-PATH', help='Specify the common path to the SSL ' 'certificate files. This is a convenience function so that ' 'only one option is required to specify the location of the ' 'certificate file and the private key file. It is expected that ' 'the files have \'.crt\' and \'.key\' extensions. This option ' 'should refer to the common part of the names for both files ' 'which appears before the extension.') add_option('all', '--ssl-ca-certificate-file', default=None, metavar='FILE-PATH', help='Specify the path to the file with ' 'the CA certificates to be used for client authentication. When ' 'specified, access to the whole site will by default require ' 'client authentication. To require client authentication for ' 'only parts of the site, use the --ssl-verify-client option.') add_option('all', '--ssl-verify-client', action='append', metavar='URL-PATH', dest='ssl_verify_client_urls', help='Specify a sub URL of the site for which client ' 'authentication is required. When this option is specified, ' 'the default of client authentication being required for the ' 'whole site will be disabled and verification will only be ' 'required for the specified sub URL.') add_option('all', '--ssl-certificate-chain-file', default=None, metavar='FILE-PATH', help='Specify the path to a file ' 'containing the certificates of Certification Authorities (CA) ' 'which form the certificate chain of the server certificate.') add_option('all', '--ssl-environment', action='store_true', default=False, help='Flag indicating whether the standard set ' 'of SSL related variables are passed in the per request ' 'environment passed to a handler.') add_option('all', '--https-only', action='store_true', default=False, help='Flag indicating whether any requests ' 'made using a HTTP request over the non secure connection ' 'should be redirected automatically to use a HTTPS request ' 'over the secure connection.') add_option('all', '--hsts-policy', default=None, metavar='PARAMS', help='Specify the HSTS policy that should be applied when ' 'HTTPS only connections are being enforced.') add_option('all', '--server-name', default=None, metavar='HOSTNAME', help='The primary host name of the web server. If this name ' 'starts with \'www.\' then an automatic redirection from the ' 'parent domain name to the \'www.\' server name will created.') add_option('all', '--server-alias', action='append', dest='server_aliases', metavar='HOSTNAME', help='A secondary ' 'host name for the web server. May include wildcard patterns.') add_option('all', '--allow-localhost', action='store_true', default=False, help='Flag indicating whether access via ' 'localhost should still be allowed when a server name has been ' 'specified and a name based virtual host has been configured.') add_option('unix', '--processes', type='int', metavar='NUMBER', help='The number of worker processes (instances of the WSGI ' 'application) to be started up and which will handle requests ' 'concurrently. Defaults to a single process.') add_option('all', '--threads', type='int', default=5, metavar='NUMBER', help='The number of threads in the request thread pool of ' 'each process for handling requests. Defaults to 5 in each ' 'process. Note that if embedded mode and only prefork MPM ' 'is available, then processes will instead be used.') add_option('unix', '--max-clients', type='int', default=None, metavar='NUMBER', help='The maximum number of simultaneous ' 'client connections that will be accepted. This will default ' 'to being 1.5 times the total number of threads in the ' 'request thread pools across all process handling requests. ' 'Note that if embedded mode is used this will be ignored.') add_option('unix', '--initial-workers', type='float', default=None, metavar='NUMBER', action='callback', callback=check_percentage, help='The initial number of workers to create on startup ' 'expressed as a percentage of the maximum number of clients. ' 'The value provided should be between 0 and 1. The default is ' 'dependent on the type of MPM being used. Note that if ' 'embedded mode is used, this will be ignored.'), add_option('unix', '--minimum-spare-workers', type='float', default=None, metavar='NUMBER', action='callback', callback=check_percentage, help='The minimum number of spare ' 'workers to maintain expressed as a percentage of the maximum ' 'number of clients. The value provided should be between 0 and ' '1. The default is dependent on the type of MPM being used. ' 'Note that if embedded mode is used, this will be ignored.') add_option('unix', '--maximum-spare-workers', type='float', default=None, metavar='NUMBER', action='callback', callback=check_percentage, help='The maximum number of spare ' 'workers to maintain expressed as a percentage of the maximum ' 'number of clients. The value provided should be between 0 and ' '1. The default is dependent on the type of MPM being used. ' 'Note that if embedded mode is used, this will be ignored.') add_option('all', '--limit-request-body', type='int', default=10485760, metavar='NUMBER', help='The maximum number of bytes which are ' 'allowed in a request body. Defaults to 10485760 (10MB).') add_option('all', '--maximum-requests', type='int', default=0, metavar='NUMBER', help='The number of requests after which ' 'any one worker process will be restarted and the WSGI ' 'application reloaded. Defaults to 0, indicating that the ' 'worker process should never be restarted based on the number ' 'of requests received.') add_option('unix', '--startup-timeout', type='int', default=15, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass waiting for the application to be successfully ' 'loaded and started by a worker process. When this timeout ' 'has been reached without the application having been ' 'successfully loaded and started, the worker process will ' 'be forced to restart. Defaults to 15 seconds.') add_option('unix', '--shutdown-timeout', type='int', default=5, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass when waiting for a worker process to shutdown as a ' 'result of the maximum number of requests or inactivity timeout ' 'being reached, or when a user initiated SIGINT signal is sent ' 'to a worker process. When this timeout has been reached the ' 'worker process will be forced to exit even if there are ' 'still active requests or it is still running Python exit ' 'functions. Defaults to 5 seconds.') add_option('unix', '--restart-interval', type='int', default='0', metavar='SECONDS', help='Number of seconds between worker ' 'process restarts. If graceful timeout is also specified, ' 'active requests will be given a chance to complete before ' 'the process is forced to exit and restart. Not enabled by ' 'default.') add_option('unix', '--cpu-time-limit', type='int', default='0', metavar='SECONDS', help='Number of seconds of CPU time the ' 'process can use before it will be restarted. If graceful ' 'timeout is also specified, active requests will be given ' 'a chance to complete before the process is forced to exit ' 'and restart. Not enabled by default.') add_option('unix', '--graceful-timeout', type='int', default=15, metavar='SECONDS', help='Grace period for requests to complete ' 'normally, while still accepting new requests, when worker ' 'processes are being shutdown and restarted due to maximum ' 'requests being reached or restart interval having expired. ' 'Defaults to 15 seconds.') add_option('unix', '--eviction-timeout', type='int', default=0, metavar='SECONDS', help='Grace period for requests to complete ' 'normally, while still accepting new requests, when the WSGI ' 'application is being evicted from the worker processes, and ' 'the process restarted, due to forced graceful restart signal. ' 'Defaults to timeout specified by \'--graceful-timeout\' ' 'option.') add_option('unix', '--deadlock-timeout', type='int', default=60, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before the worker process is forcibly shutdown and ' 'restarted after a potential deadlock on the Python GIL has ' 'been detected. Defaults to 60 seconds.') add_option('unix', '--inactivity-timeout', type='int', default=0, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before the worker process is shutdown and restarted ' 'when the worker process has entered an idle state and is no ' 'longer receiving new requests. Not enabled by default.') add_option('unix', '--ignore-activity', action='append', dest='ignore_activity', metavar='URL-PATH', help='Specify ' 'the URL path for any location where activity should be ' 'ignored when the \'--activity-timeout\' option is used. ' 'This would be used on health check URLs so that health ' 'checks do not prevent process restarts due to inactivity.') add_option('unix', '--request-timeout', type='int', default=60, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before the worker process is forcibly shutdown and ' 'restarted when a request does not complete in the expected ' 'time. In a multi threaded worker, the request time is ' 'calculated as an average across all request threads. Defaults ' 'to 60 seconds.') add_option('unix', '--connect-timeout', type='int', default=15, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before giving up on attempting to get a connection ' 'to the worker process from the Apache child process which ' 'accepted the request. This comes into play when the worker ' 'listener backlog limit is exceeded. Defaults to 15 seconds.') add_option('all', '--socket-timeout', type='int', default=60, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before timing out on a read or write operation on ' 'a socket and aborting the request. Defaults to 60 seconds.') add_option('all', '--queue-timeout', type='int', default=45, metavar='SECONDS', help='Maximum number of seconds allowed ' 'for a request to be accepted by a worker process to be ' 'handled, taken from the time when the Apache child process ' 'originally accepted the request. Defaults to 45 seconds.') add_option('all', '--header-timeout', type='int', default=15, metavar='SECONDS', help='The number of seconds allowed for ' 'receiving the request including the headers. This may be ' 'dynamically increased if a minimum rate for reading the ' 'request and headers is also specified, up to any limit ' 'imposed by a maximum header timeout. Defaults to 15 seconds.') add_option('all', '--header-max-timeout', type='int', default=30, metavar='SECONDS', help='Maximum number of seconds allowed for ' 'receiving the request including the headers. This is the hard ' 'limit after taking into consideration and increases to the ' 'basic timeout due to minimum rate for reading the request and ' 'headers which may be specified. Defaults to 30 seconds.') add_option('all', '--header-min-rate', type='int', default=500, metavar='BYTES', help='The number of bytes required to be sent ' 'as part of the request and headers to trigger a dynamic ' 'increase in the timeout on receiving the request including ' 'headers. Each time this number of bytes is received the timeout ' 'will be increased by 1 second up to any maximum specified by ' 'the maximum header timeout. Defaults to 500 bytes.') add_option('all', '--body-timeout', type='int', default=15, metavar='SECONDS', help='The number of seconds allowed for ' 'receiving the request body. This may be dynamically increased ' 'if a minimum rate for reading the request body is also ' 'specified, up to any limit imposed by a maximum body timeout. ' 'Defaults to 15 seconds.') add_option('all', '--body-max-timeout', type='int', default=0, metavar='SECONDS', help='Maximum number of seconds allowed for ' 'receiving the request body. This is the hard limit after ' 'taking into consideration and increases to the basic timeout ' 'due to minimum rate for reading the request body which may be ' 'specified. Defaults to 0 indicating there is no maximum.') add_option('all', '--body-min-rate', type='int', default=500, metavar='BYTES', help='The number of bytes required to be sent ' 'as part of the request body to trigger a dynamic increase in ' 'the timeout on receiving the request body. Each time this ' 'number of bytes is received the timeout will be increased ' 'by 1 second up to any maximum specified by the maximum body ' 'timeout. Defaults to 500 bytes.') add_option('all', '--server-backlog', type='int', default=500, metavar='NUMBER', help='Depth of server socket listener ' 'backlog for Apache child processes. Defaults to 500.') add_option('unix', '--daemon-backlog', type='int', default=100, metavar='NUMBER', help='Depth of server socket listener ' 'backlog for daemon processes. Defaults to 100.') add_option('unix', '--send-buffer-size', type='int', default=0, metavar='NUMBER', help='Size of socket buffer for sending ' 'data to daemon processes. Defaults to 0, indicating ' 'the system default socket buffer size is used.') add_option('unix', '--receive-buffer-size', type='int', default=0, metavar='NUMBER', help='Size of socket buffer for receiving ' 'data from daemon processes. Defaults to 0, indicating ' 'the system default socket buffer size is used.') add_option('unix', '--header-buffer-size', type='int', default=0, metavar='NUMBER', help='Size of buffer used for reading ' 'response headers from daemon processes. Defaults to 0, ' 'indicating internal default of 32768 bytes is used.') add_option('unix', '--response-buffer-size', type='int', default=0, metavar='NUMBER', help='Maximum amount of response content ' 'that will be allowed to be buffered in the Apache child ' 'worker process when proxying the response from a daemon ' 'process. Defaults to 0, indicating internal default of ' '65536 bytes is used.') add_option('unix', '--response-socket-timeout', type='int', default=0, metavar='SECONDS', help='Maximum number of seconds allowed ' 'to pass before timing out on a write operation back to the ' 'HTTP client when the response buffer has filled and data is ' 'being forcibly flushed. Defaults to 0 seconds indicating that ' 'it will default to the value of the \'socket-timeout\' option.') add_option('all', '--enable-sendfile', action='store_true', default=False, help='Flag indicating whether sendfile() support ' 'should be enabled. Defaults to being disabled. This should ' 'only be enabled if the operating system kernel and file system ' 'type where files are hosted supports it.') add_option('unix', '--disable-reloading', action='store_true', default=False, help='Disables all reloading of daemon processes ' 'due to changes to the file containing the WSGI application ' 'entrypoint, or any other loaded source files. This has no ' 'effect when embedded mode is used as reloading is automatically ' 'disabled for embedded mode.') add_option('unix', '--reload-on-changes', action='store_true', default=False, help='Flag indicating whether worker processes ' 'should be automatically restarted when any Python code file ' 'loaded by the WSGI application has been modified. Defaults to ' 'being disabled. When reloading on any code changes is disabled, ' 'unless all reloading is also disabled, the worker processes ' 'will still though be reloaded if the file containing the WSGI ' 'application entrypoint is modified.') add_option('unix', '--user', default=default_run_user(), metavar='USERNAME', help='When being run by the root user, ' 'the user that the WSGI application should be run as.') add_option('unix', '--group', default=default_run_group(), metavar='GROUP', help='When being run by the root user, the ' 'group that the WSGI application should be run as.') add_option('all', '--callable-object', default='application', metavar='NAME', help='The name of the entry point for the WSGI ' 'application within the WSGI script file. Defaults to ' 'the name \'application\'.') add_option('all', '--map-head-to-get', default='Auto', metavar='OFF|ON|AUTO', help='Flag indicating whether HEAD ' 'requests should be mapped to a GET request. By default a HEAD ' 'request will be automatically mapped to a GET request when an ' 'Apache output filter is detected that may want to see the ' 'entire response in order to set up response headers correctly ' 'for a HEAD request. This can be disable by setting to \'Off\'.') add_option('all', '--document-root', metavar='DIRECTORY-PATH', help='The directory which should be used as the document root ' 'and which contains any static files.') add_option('all', '--directory-index', metavar='FILE-NAME', help='The name of a directory index resource to be found in the ' 'document root directory. Requests mapping to the directory ' 'will be mapped to this resource rather than being passed ' 'through to the WSGI application.') add_option('all', '--directory-listing', action='store_true', default=False, help='Flag indicating if directory listing ' 'should be enabled where static file application type is ' 'being used and no directory index file has been specified.') add_option('all', '--allow-override', metavar='DIRECTIVE-TYPE', action='append', help='Allow directives to be overridden from a ' '\'.htaccess\' file. Defaults to \'None\', indicating that any ' '\'.htaccess\' file will be ignored with override directives ' 'not being permitted.') add_option('all', '--mount-point', metavar='URL-PATH', default='/', help='The URL path at which the WSGI application will be ' 'mounted. Defaults to being mounted at the root URL of the ' 'site.') add_option('all', '--url-alias', action='append', nargs=2, dest='url_aliases', metavar='URL-PATH FILE-PATH|DIRECTORY-PATH', help='Map a single static file or a directory of static files ' 'to a sub URL.') add_option('all', '--error-document', action='append', nargs=2, dest='error_documents', metavar='STATUS URL-PATH', help='Map ' 'a specific sub URL as the handler for HTTP errors generated ' 'by the web server.') add_option('all', '--error-override', action='store_true', default=False, help='Flag indicating whether Apache error ' 'documents will override application error responses.') add_option('all', '--proxy-mount-point', action='append', nargs=2, dest='proxy_mount_points', metavar='URL-PATH URL', help='Map a sub URL such that any requests against it will be ' 'proxied to the specified URL. This is only for proxying to a ' 'site as a whole, or a sub site, not individual resources.') add_option('all', '--proxy-url-alias', action='append', nargs=2, dest='proxy_mount_points', metavar='URL-PATH URL', help=optparse.SUPPRESS_HELP) add_option('all', '--proxy-virtual-host', action='append', nargs=2, dest='proxy_virtual_hosts', metavar='HOSTNAME URL', help='Proxy any requests for the specified host name to the ' 'remote URL.') add_option('all', '--trust-proxy-header', action='append', default=[], dest='trusted_proxy_headers', metavar='HEADER-NAME', help='The name of any trusted HTTP header providing details ' 'of the front end client request when proxying.') add_option('all', '--trust-proxy', action='append', default=[], dest='trusted_proxies', metavar='IP-ADDRESS/SUBNET', help='The IP address or subnet corresponding to any trusted ' 'proxy.') add_option('all', '--keep-alive-timeout', type='int', default=2, metavar='SECONDS', help='The number of seconds which a client ' 'connection will be kept alive to allow subsequent requests ' 'to be made over the same connection when a keep alive ' 'connection is requested. Defaults to 2, indicating that keep ' 'alive connections are set for 2 seconds.') add_option('all', '--compress-responses', action='store_true', default=False, help='Flag indicating whether responses for ' 'common text based responses, such as plain text, HTML, XML, ' 'CSS and Javascript should be compressed.') add_option('all', '--server-metrics', action='store_true', default=False, help='Flag indicating whether internal server ' 'metrics will be available within the WSGI application. ' 'Defaults to being disabled.') add_option('all', '--server-status', action='store_true', default=False, help='Flag indicating whether web server status ' 'will be available at the /server-status sub URL. Defaults to ' 'being disabled.') add_option('all', '--host-access-script', metavar='SCRIPT-PATH', default=None, help='Specify a Python script file for ' 'performing host access checks.') add_option('all', '--auth-user-script', metavar='SCRIPT-PATH', default=None, help='Specify a Python script file for ' 'performing user authentication.') add_option('all', '--auth-type', metavar='TYPE', default='Basic', help='Specify the type of authentication ' 'scheme used when authenticating users. Defaults to using ' '\'Basic\'. Alternate schemes available are \'Digest\'.') add_option('all', '--auth-group-script', metavar='SCRIPT-PATH', default=None, help='Specify a Python script file for ' 'performing group based authorization in conjunction with ' 'a user authentication script.') add_option('all', '--auth-group', metavar='NAME', default='wsgi', help='Specify the group which users should ' 'be a member of when using a group based authorization script. ' 'Defaults to \'wsgi\' as a place holder but should be ' 'overridden to be the actual group you use rather than ' 'making your group name match the default.') add_option('all', '--include-file', action='append', dest='include_files', metavar='FILE-PATH', help='Specify the ' 'path to an additional web server configuration file to be ' 'included at the end of the generated web server configuration ' 'file.') add_option('all', '--rewrite-rules', metavar='FILE-PATH', help='Specify an alternate server configuration file which ' 'contains rewrite rules. Defaults to using the ' '\'rewrite.conf\' stored under the server root directory.') add_option('unix', '--envvars-script', metavar='FILE-PATH', help='Specify an alternate script file for user defined web ' 'server environment variables. Defaults to using the ' '\'envvars\' stored under the server root directory.') add_option('unix', '--lang', default=None, metavar='NAME', help=optparse.SUPPRESS_HELP) add_option('all', '--locale', default=None, metavar='NAME', help='Specify the natural language locale for the process ' 'as normally defined by the \'LC_ALL\' environment variable. ' 'If not specified, then the default locale for this process ' 'will be used. If the default locale is however \'C\' or ' '\'POSIX\' then an attempt will be made to use either the ' '\'en_US.UTF-8\' or \'C.UTF-8\' locales and if that is not ' 'possible only then fallback to the default locale of this ' 'process.') add_option('all', '--setenv', action='append', nargs=2, dest='setenv_variables', metavar='KEY VALUE', help='Specify ' 'a name/value pairs to be added to the per request WSGI environ ' 'dictionary') add_option('all', '--passenv', action='append', dest='passenv_variables', metavar='KEY', help='Specify the ' 'names of any process level environment variables which should ' 'be passed as a name/value pair in the per request WSGI ' 'environ dictionary.') add_option('all', '--working-directory', metavar='DIRECTORY-PATH', help='Specify the directory which should be used as the ' 'current working directory of the WSGI application. This ' 'directory will be searched when importing Python modules ' 'so long as the WSGI application doesn\'t subsequently ' 'change the current working directory. Defaults to the ' 'directory this script is run from.') add_option('all', '--pid-file', metavar='FILE-PATH', help='Specify an alternate file to be used to store the ' 'process ID for the root process of the web server.') add_option('all', '--server-root', metavar='DIRECTORY-PATH', help='Specify an alternate directory for where the generated ' 'web server configuration, startup files and logs will be ' 'stored. On Linux defaults to the sub directory specified by ' 'the TMPDIR environment variable, or /tmp if not specified. ' 'On macOS, defaults to the /var/tmp directory.') add_option('unix', '--server-mpm', action='append', dest='server_mpm_variables', metavar='NAME', help='Specify ' 'preferred MPM to use when using Apache 2.4 with dynamically ' 'loadable MPMs and more than one is available. By default ' 'the MPM precedence order when no preference is given is ' '\"event\", \"worker" and \"prefork\".') add_option('all', '--log-directory', metavar='DIRECTORY-PATH', help='Specify an alternate directory for where the log files ' 'will be stored. Defaults to the server root directory.') add_option('all', '--log-level', default='warn', metavar='NAME', help='Specify the log level for logging. Defaults to \'warn\'.') add_option('all', '--access-log', action='store_true', default=False, help='Flag indicating whether the web server access log ' 'should be enabled. Defaults to being disabled.') add_option('unix', '--startup-log', action='store_true', default=False, help='Flag indicating whether the web server startup log should ' 'be enabled. Defaults to being disabled.') add_option('all', '--verbose-debugging', action='store_true', dest='verbose_debugging', help=optparse.SUPPRESS_HELP) add_option('unix', '--log-to-terminal', action='store_true', default=False, help='Flag indicating whether logs should ' 'be directed back to the terminal. Defaults to being disabled. ' 'If --log-directory is set explicitly, it will override this ' 'option. If logging to the terminal is carried out, any ' 'rotating of log files will be disabled.') add_option('all', '--access-log-format', metavar='FORMAT', help='Specify the format of the access log records.'), add_option('all', '--error-log-format', metavar='FORMAT', help='Specify the format of the error log records.'), add_option('all', '--error-log-name', metavar='FILE-NAME', default='error_log', help='Specify the name of the error ' 'log file when it is being written to the log directory.'), add_option('all', '--access-log-name', metavar='FILE-NAME', default='access_log', help='Specify the name of the access ' 'log file when it is being written to the log directory.'), add_option('unix', '--startup-log-name', metavar='FILE-NAME', default='startup_log', help='Specify the name of the startup ' 'log file when it is being written to the log directory.'), add_option('unix', '--rotate-logs', action='store_true', default=False, help='Flag indicating whether log rotation should be performed.'), add_option('unix', '--max-log-size', default=5, type='int', metavar='MB', help='The maximum size in MB the log file should ' 'be allowed to reach before log file rotation is performed.'), add_option('unix', '--rotatelogs-executable', default=apxs_config.ROTATELOGS, metavar='FILE-PATH', help='Override the path to the rotatelogs executable.'), add_option('all', '--python-path', action='append', dest='python_paths', metavar='DIRECTORY-PATH', help='Specify ' 'the path to any additional directory that should be added to ' 'the Python module search path. Note that these directories will ' 'not be processed for \'.pth\' files. If processing of \'.pth\' ' 'files is required, set the \'PYTHONPATH\' environment variable ' 'in a script specified by the \'--envvars-script\' option.') add_option('all', '--python-eggs', metavar='DIRECTORY-PATH', help='Specify an alternate directory which should be used for ' 'unpacking of Python eggs. Defaults to a sub directory of ' 'the server root directory.') add_option('unix', '--shell-executable', default=SHELL, metavar='FILE-PATH', help='Override the path to the shell ' 'used in the \'apachectl\' script. The \'bash\' shell will ' 'be used if available.') add_option('unix', '--httpd-executable', default=apxs_config.HTTPD, metavar='FILE-PATH', help='Override the path to the Apache web ' 'server executable.') add_option('unix', '--process-name', metavar='NAME', help='Override ' 'the name given to the Apache parent process. This might be ' 'needed when a process manager expects the process to be named ' 'a certain way but due to a sequence of exec calls the name ' 'changed.') add_option('all', '--modules-directory', default=apxs_config.LIBEXECDIR, metavar='DIRECTORY-PATH', help='Override the path to the Apache ' 'web server modules directory.') add_option('unix', '--mime-types', default=find_mimetypes(), metavar='FILE-PATH', help='Override the path to the mime types ' 'file used by the web server.') add_option('unix', '--socket-prefix', metavar='DIRECTORY-PATH', help='Specify an alternate directory name prefix to be used ' 'for the UNIX domain sockets used by mod_wsgi to communicate ' 'between the Apache child processes and the daemon processes.') add_option('all', '--add-handler', action='append', nargs=2, dest='handler_scripts', metavar='EXTENSION SCRIPT-PATH', help='Specify a WSGI application to be used as a special ' 'handler for any resources matched from the document root ' 'directory with a specific extension type.') add_option('all', '--chunked-request', action='store_true', default=False, help='Flag indicating whether requests which ' 'use chunked transfer encoding will be accepted.') add_option('hidden', '--with-newrelic', action='store_true', default=False, help='Flag indicating whether all New Relic ' 'performance monitoring features should be enabled.') add_option('hidden', '--with-newrelic-agent', action='store_true', default=False, help='Flag indicating whether the New Relic ' 'Python agent should be enabled for reporting application server ' 'metrics.') add_option('hidden', '--with-newrelic-platform', action='store_true', default=False, help='Flag indicating whether the New Relic ' 'platform plugin should be enabled for reporting server level ' 'metrics.') add_option('hidden', '--newrelic-config-file', metavar='FILE-PATH', default='', help='Specify the location of the New Relic agent ' 'configuration file.') add_option('hidden', '--newrelic-environment', metavar='NAME', default='', help='Specify the name of the environment section ' 'that should be used from New Relic agent configuration file.') add_option('hidden', '--with-php5', action='store_true', default=False, help='Flag indicating whether PHP 5 support should be enabled. ' 'PHP code files must use the \'.php\' extension.') add_option('all', '--with-cgi', action='store_true', default=False, help='Flag indicating whether CGI script support should be ' 'enabled. CGI scripts must use the \'.cgi\' extension and be ' 'executable') add_option('unix', '--service-script', action='append', nargs=2, dest='service_scripts', metavar='SERVICE SCRIPT-PATH', help='Specify the name of a Python script to be loaded and ' 'executed in the context of a distinct daemon process. Used ' 'for running a managed service.') add_option('unix', '--service-user', action='append', nargs=2, dest='service_users', metavar='SERVICE USERNAME', help='When being run by the root user, the user that the ' 'distinct daemon process started to run the managed service ' 'should be run as.') add_option('unix', '--service-group', action='append', nargs=2, dest='service_groups', metavar='SERVICE GROUP', help='When being run by the root user, the group that the ' 'distinct daemon process started to run the managed service ' 'should be run as.') add_option('unix', '--service-log-file', action='append', nargs=2, dest='service_log_files', metavar='SERVICE FILE-NAME', help='Specify the name of a separate log file to be used for ' 'the managed service.') add_option('all', '--orphan-interpreter', action='store_true', default=False, help='Flag indicating whether should skip over ' 'destroying the Python interpreter on process shutdown.') add_option('unix', '--embedded-mode', action='store_true', default=False, help='Flag indicating whether to run in embedded mode rather ' 'than the default daemon mode. Numerous daemon mode specific ' 'features will not operate when this mode is used.') add_option('all', '--enable-docs', action='store_true', default=False, help='Flag indicating whether the mod_wsgi documentation should ' 'be made available at the /__wsgi__/docs sub URL.') add_option('unix', '--debug-mode', action='store_true', default=False, help='Flag indicating whether to run in single process mode ' 'to allow the running of an interactive Python debugger. This ' 'will override all options related to processes, threads and ' 'communication with workers. All forms of source code reloading ' 'will also be disabled. Both stdin and stdout will be attached ' 'to the console to allow interaction with the Python debugger.') add_option('unix', '--enable-debugger', action='store_true', default=False, help='Flag indicating whether post mortem ' 'debugging of any exceptions which propagate out from the ' 'WSGI application when running in debug mode should be ' 'performed. Post mortem debugging is performed using the ' 'Python debugger (pdb).'), add_option('unix', '--debugger-startup', action='store_true', default=False, help='Flag indicating whether when post ' 'mortem debugging is enabled, that the debugger should ' 'also be thrown into the interactive console on initial ' 'startup of the server to allow breakpoints to be setup.'), add_option('unix', '--enable-coverage', action='store_true', default=False, help='Flag indicating whether coverage analysis ' 'is enabled when running in debug mode.') add_option('unix', '--coverage-directory', metavar='DIRECTORY-PATH', default='', help='Override the path to the directory into ' 'which coverage analysis will be generated when enabled under ' 'debug mode.') add_option('unix', '--enable-profiler', action='store_true', default=False, help='Flag indicating whether code profiling ' 'is enabled when running in debug mode.') add_option('unix', '--profiler-directory', metavar='DIRECTORY-PATH', default='', help='Override the path to the directory into ' 'which profiler data will be written when enabled under debug ' 'mode.') add_option('unix', '--enable-recorder', action='store_true', default=False, help='Flag indicating whether recording of ' 'requests is enabled when running in debug mode.') add_option('unix', '--recorder-directory', metavar='DIRECTORY-PATH', default='', help='Override the path to the directory into ' 'which recorder data will be written when enabled under debug ' 'mode.') add_option('unix', '--enable-gdb', action='store_true', default=False, help='Flag indicating whether Apache should ' 'be run under \'gdb\' when running in debug mode. This ' 'would be use to debug process crashes.') add_option('unix', '--gdb-executable', default='gdb', metavar='FILE-PATH', help='Override the path to the gdb ' 'executable.') add_option('unix', '--setup-only', action='store_true', default=False, help='Flag indicating that after the configuration files have ' 'been setup, that the command should then exit and not go on ' 'to actually run up the Apache server. This is to allow for ' 'the generation of the configuration with Apache then later ' 'being started separately using the generated \'apachectl\' ' 'script.') # add_option('unix', '--isatty', action='store_true', default=False, # help='Flag indicating whether should assume being run in an ' # 'interactive terminal session. In this case Apache will not ' # 'replace this wrapper script, but will be run as a sub process.' # 'Signals such as SIGINT, SIGTERM, SIGHUP and SIGUSR1 will be ' # 'forwarded onto Apache, but SIGWINCH will be blocked so that ' # 'resizing of a terminal session window will not cause Apache ' # 'to shutdown. This is a separate option at this time rather ' # 'than being determined automatically while the reliability of ' # 'intercepting and forwarding signals is verified.') def cmd_setup_server(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog setup-server script [options]' parser = optparse.OptionParser(usage=usage, option_list=option_list, formatter=formatter) (options, args) = parser.parse_args(params) _cmd_setup_server('setup-server', args, vars(options)) def _mpm_module_defines(modules_directory, preferred=None): if os.name == 'nt': return ['-DMOD_WSGI_MPM_ENABLE_WINNT_MODULE'] result = [] workers = ['event', 'worker', 'prefork'] found = False for name in workers: if not preferred or name in preferred: if os.path.exists(os.path.join(modules_directory, 'mod_mpm_%s.so' % name)): if not found: result.append('-DMOD_WSGI_MPM_ENABLE_%s_MODULE' % name.upper()) found = True result.append('-DMOD_WSGI_MPM_EXISTS_%s_MODULE' % name.upper()) return result def _cmd_setup_server(command, args, options): options['sys_argv'] = repr(sys.argv) options['mod_wsgi_so'] = where() options['working_directory'] = options['working_directory'] or os.getcwd() options['working_directory'] = os.path.abspath(options['working_directory']) if not options['host']: options['listener_host'] = None options['host'] = 'localhost' else: options['listener_host'] = options['host'] if os.name == 'nt': options['daemon_name'] = '(wsgi:%s:%s:%s)' % (options['host'], options['port'], getpass.getuser()) else: options['daemon_name'] = '(wsgi:%s:%s:%s)' % (options['host'], options['port'], os.getuid()) if not options['server_root']: if os.name == 'nt': tmpdir = tempfile.gettempdir() elif sys.platform == 'darwin': tmpdir = '/var/tmp' else: tmpdir = os.environ.get('TMPDIR') tmpdir = tmpdir or '/tmp' tmpdir = tmpdir.rstrip('/') if os.name == 'nt': options['server_root'] = ('%s/mod_wsgi-%s-%s-%s' % (tmpdir, options['host'], options['port'], getpass.getuser()) ).replace('\\','/') else: options['server_root'] = '%s/mod_wsgi-%s:%s:%s' % (tmpdir, options['host'], options['port'], os.getuid()) if not os.path.isdir(options['server_root']): os.mkdir(options['server_root']) if options['ssl_certificate_file']: options['ssl_certificate_file'] = os.path.abspath( options['ssl_certificate_file']) if options['ssl_certificate_key_file']: options['ssl_certificate_key_file'] = os.path.abspath( options['ssl_certificate_key_file']) if options['ssl_certificate']: options['ssl_certificate'] = os.path.abspath( options['ssl_certificate']) options['ssl_certificate_file'] = options['ssl_certificate'] options['ssl_certificate_file'] += '.crt' options['ssl_certificate_key_file'] = options['ssl_certificate'] options['ssl_certificate_key_file'] += '.key' if options['ssl_ca_certificate_file']: options['ssl_ca_certificate_file'] = os.path.abspath( options['ssl_ca_certificate_file']) if options['ssl_certificate_chain_file']: options['ssl_certificate_chain_file'] = os.path.abspath( options['ssl_certificate_chain_file']) if options['entry_point']: args = [options['entry_point']] if not args: if options['application_type'] != 'static': options['entry_point'] = posixpath.join( options['server_root'], 'default.wsgi') options['application_type'] = 'script' options['enable_docs'] = True else: if not options['document_root']: options['document_root'] = os.getcwd() options['entry_point'] = '(static)' else: if options['application_type'] in ('script', 'paste'): options['entry_point'] = posixpath.abspath(args[0]) elif options['application_type'] == 'static': if not options['document_root']: options['document_root'] = posixpath.abspath(args[0]) options['entry_point'] = 'ignored' else: options['entry_point'] = 'overridden' else: options['entry_point'] = args[0] if options['host_access_script']: options['host_access_script'] = posixpath.abspath( options['host_access_script']) if options['auth_user_script']: options['auth_user_script'] = posixpath.abspath( options['auth_user_script']) if options['auth_group_script']: options['auth_group_script'] = posixpath.abspath( options['auth_group_script']) options['documentation_directory'] = os.path.join(os.path.dirname( os.path.dirname(__file__)), 'docs') options['images_directory'] = os.path.join(os.path.dirname( os.path.dirname(__file__)), 'images') if os.path.exists(posixpath.join(options['documentation_directory'], 'index.html')): options['documentation_url'] = '/__wsgi__/docs/' else: options['documentation_url'] = 'http://www.modwsgi.org/' if not os.path.isabs(options['server_root']): options['server_root'] = posixpath.abspath(options['server_root']) if not options['document_root']: options['document_root'] = posixpath.join(options['server_root'], 'htdocs') try: os.mkdir(options['document_root']) except Exception: pass if not options['allow_override']: options['allow_override'] = 'None' else: options['allow_override'] = ' '.join(options['allow_override']) if not options['mount_point'].startswith('/'): options['mount_point'] = posixpath.normpath('/' + options['mount_point']) # Create subdirectories for mount points in document directory # so that fallback resource rewrite rule will work. if options['mount_point'] != '/': parts = options['mount_point'].rstrip('/').split('/')[1:] subdir = options['document_root'] try: for part in parts: subdir = posixpath.join(subdir, part) if not os.path.exists(subdir): os.mkdir(subdir) except Exception: raise if not os.path.isabs(options['document_root']): options['document_root'] = posixpath.abspath(options['document_root']) if not options['log_directory']: options['log_directory'] = options['server_root'] else: # The --log-directory option overrides --log-to-terminal. options['log_to_terminal'] = False if options['log_to_terminal']: # The --log-to-terminal option overrides --rotate-logs. options['rotate_logs'] = False try: os.mkdir(options['log_directory']) except Exception: pass if not os.path.isabs(options['log_directory']): options['log_directory'] = posixpath.abspath(options['log_directory']) if not options['log_to_terminal']: options['error_log_file'] = posixpath.join(options['log_directory'], options['error_log_name']) else: if os.name == 'nt': options['error_log_file'] = 'CON' else: try: with open('/dev/stderr', 'w'): pass except IOError: options['error_log_file'] = '|%s' % find_program( ['tee'], default='tee') else: options['error_log_file'] = '/dev/stderr' if not options['log_to_terminal']: options['access_log_file'] = posixpath.join( options['log_directory'], options['access_log_name']) else: try: with open('/dev/stdout', 'w'): pass except IOError: options['access_log_file'] = '|%s' % find_program( ['tee'], default='tee') else: options['access_log_file'] = '/dev/stdout' if options['access_log_format']: if options['access_log_format'] in ('common', 'combined'): options['log_format_nickname'] = options['access_log_format'] options['access_log_format'] = 'undefined' else: options['log_format_nickname'] = 'custom' else: options['log_format_nickname'] = 'common' options['access_log_format'] = 'undefined' options['access_log_format'] = options['access_log_format'].replace( '\"', '\\"') if options['error_log_format']: options['error_log_format'] = options['error_log_format'].replace( '\"', '\\"') options['pid_file'] = ((options['pid_file'] and posixpath.abspath( options['pid_file'])) or posixpath.join(options['server_root'], 'httpd.pid')) options['python_eggs'] = (posixpath.abspath(options['python_eggs']) if options['python_eggs'] is not None else None) if options['python_eggs'] is None: options['python_eggs'] = posixpath.join(options['server_root'], 'python-eggs') try: os.mkdir(options['python_eggs']) if os.name != 'nt' and os.getuid() == 0: import pwd import grp os.chown(options['python_eggs'], pwd.getpwnam(options['user']).pw_uid, grp.getgrnam(options['group']).gr_gid) except Exception: pass if options['python_paths'] is None: options['python_paths'] = [] if options['debug_mode'] or options['embedded_mode']: if options['working_directory'] not in options['python_paths']: options['python_paths'].insert(0, options['working_directory']) if options['debug_mode']: options['server_mpm_variables'] = ['worker', 'prefork'] elif options['embedded_mode']: if not options['server_mpm_variables']: options['server_mpm_variables'] = ['worker', 'prefork'] # Special case to check for when being executed from shiv variant # of a zipapp application bundle. We need to work out where the # site packages directory is and pass it with Python module search # path so is known about by the Apache sub process when executed. site_packages = [] if '_bootstrap' in sys.modules: bootstrap = sys.modules['_bootstrap'] if 'bootstrap' in dir(bootstrap): frame = inspect.currentframe() while frame is not None: code = frame.f_code if (code and code.co_filename == bootstrap.__file__ and code.co_name == 'bootstrap' and 'site_packages' in frame.f_locals): site_packages.append(str(frame.f_locals['site_packages'])) break frame = frame.f_back options['python_paths'].extend(site_packages) options['python_path'] = ':'.join(options['python_paths']) options['multiprocess'] = options['processes'] is not None options['processes'] = options['processes'] or 1 options['python_home'] = sys.prefix.replace('\\','/') options['keep_alive'] = options['keep_alive_timeout'] != 0 request_read_timeout = '' if options['header_timeout'] > 0: request_read_timeout += 'header=%d' % options['header_timeout'] if options['header_max_timeout'] > 0: request_read_timeout += '-%d' % options['header_max_timeout'] if options['header_min_rate'] > 0: request_read_timeout += ',MinRate=%d' % options['header_min_rate'] if options['body_timeout'] > 0: request_read_timeout += ' body=%d' % options['body_timeout'] if options['body_max_timeout'] > 0: request_read_timeout += '-%d' % options['body_max_timeout'] if options['body_min_rate'] > 0: request_read_timeout += ',MinRate=%d' % options['body_min_rate'] options['request_read_timeout'] = request_read_timeout if options['server_metrics']: options['server_metrics_flag'] = 'On' else: options['server_metrics_flag'] = 'Off' if options['handler_scripts']: handler_scripts = [] for extension, script in options['handler_scripts']: if not os.path.isabs(script): script = posixpath.abspath(script) handler_scripts.append((extension, script)) options['handler_scripts'] = handler_scripts if options['newrelic_config_file']: options['newrelic_config_file'] = posixpath.abspath( options['newrelic_config_file']) if options['with_newrelic']: options['with_newrelic_agent'] = True options['with_newrelic_platform'] = True if options['with_newrelic_platform']: options['server_metrics'] = True if options['service_scripts']: service_scripts = [] for name, script in options['service_scripts']: if not os.path.isabs(script): script = posixpath.abspath(script) service_scripts.append((name, script)) options['service_scripts'] = service_scripts # Node that all the below calculations are overridden if are using # embedded mode. max_clients = options['processes'] * options['threads'] if options['max_clients'] is not None: max_clients = max(options['max_clients'], max_clients) else: max_clients = 10 + max(10, int(1.5 * max_clients)) initial_workers = options['initial_workers'] min_spare_workers = options['minimum_spare_workers'] max_spare_workers = options['maximum_spare_workers'] if initial_workers is None: prefork_initial_workers = 0.05 else: prefork_initial_workers = initial_workers if min_spare_workers is None: prefork_min_spare_workers = prefork_initial_workers else: prefork_min_spare_workers = min_spare_workers if max_spare_workers is None: prefork_max_spare_workers = 0.1 else: prefork_max_spare_workers = max_spare_workers options['prefork_max_clients'] = max_clients options['prefork_server_limit'] = max_clients options['prefork_start_servers'] = max(1, int( prefork_initial_workers * max_clients)) options['prefork_min_spare_servers'] = max(1, int( prefork_min_spare_workers * max_clients)) options['prefork_max_spare_servers'] = max(1, int( prefork_max_spare_workers * max_clients)) if initial_workers is None: worker_initial_workers = 0.2 else: worker_initial_workers = initial_workers if min_spare_workers is None: worker_min_spare_workers = worker_initial_workers else: worker_min_spare_workers = min_spare_workers if max_spare_workers is None: worker_max_spare_workers = 0.6 else: worker_max_spare_workers = max_spare_workers options['worker_max_clients'] = max_clients if max_clients > 20: options['worker_threads_per_child'] = int(max_clients / (int(max_clients / 20) + 1)) else: options['worker_threads_per_child'] = 10 options['worker_thread_limit'] = options['worker_threads_per_child'] count = max_clients / options['worker_threads_per_child'] options['worker_server_limit'] = int(math.floor(count)) if options['worker_server_limit'] != count: options['worker_server_limit'] += 1 options['worker_max_clients'] = (options['worker_server_limit'] * options['worker_threads_per_child']) options['worker_start_servers'] = max(1, int(worker_initial_workers * options['worker_server_limit'])) options['worker_min_spare_threads'] = max( options['worker_threads_per_child'], int(worker_min_spare_workers * options['worker_server_limit']) * options['worker_threads_per_child']) options['worker_max_spare_threads'] = max( options['worker_threads_per_child'], int(worker_max_spare_workers * options['worker_server_limit']) * options['worker_threads_per_child']) if options['embedded_mode']: max_clients = options['processes'] * options['threads'] options['prefork_max_clients'] = max_clients options['prefork_server_limit'] = max_clients options['prefork_start_servers'] = max_clients options['prefork_min_spare_servers'] = max_clients options['prefork_max_spare_servers'] = max_clients options['worker_max_clients'] = max_clients options['worker_server_limit'] = options['processes'] options['worker_thread_limit'] = options['threads'] options['worker_threads_per_child'] = options['threads'] options['worker_start_servers'] = options['processes'] options['worker_min_spare_threads'] = max_clients options['worker_max_spare_threads'] = max_clients options['httpd_conf'] = posixpath.join(options['server_root'], 'httpd.conf') options['httpd_executable'] = os.environ.get('HTTPD', options['httpd_executable']) if os.name != 'nt': if not os.path.isabs(options['httpd_executable']): options['httpd_executable'] = find_program( [options['httpd_executable']], 'httpd', ['/usr/sbin']) if not options['process_name']: options['process_name'] = posixpath.basename( options['httpd_executable']) + ' (mod_wsgi-express)' options['process_name'] = options['process_name'].ljust( len(options['daemon_name'])) options['rewrite_rules'] = (posixpath.abspath( options['rewrite_rules']) if options['rewrite_rules'] is not None else None) options['envvars_script'] = (posixpath.abspath( options['envvars_script']) if options['envvars_script'] is not None else None) if options['locale'] is None: options['locale'] = options['lang'] if options['locale'] is None: language, encoding = locale.getdefaultlocale() if language is None: language = 'C' if encoding is None: options['locale'] = locale.normalize(language) else: options['locale'] = locale.normalize(language + '.' + encoding) if options['locale'].upper() in ('C', 'POSIX'): oldlocale = locale.setlocale(locale.LC_ALL) try: locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') options['locale'] = 'en_US.UTF-8' except locale.Error: try: locale.setlocale(locale.LC_ALL, 'C.UTF-8') options['locale'] = 'C.UTF-8' except locale.Error: pass locale.setlocale(locale.LC_ALL, oldlocale) options['lang'] = options['locale'] options['httpd_arguments_list'] = [] options['trusted_proxy_headers'] = ' '.join( options['trusted_proxy_headers']) options['trusted_proxies'] = ' '.join(options['trusted_proxies']) if options['startup_log']: if not options['log_to_terminal']: options['startup_log_file'] = posixpath.join( options['log_directory'], options['startup_log_name']) else: if os.name == 'nt': options['startup_log_file'] = 'CON' else: try: with open('/dev/stderr', 'w'): pass except IOError: try: with open('/dev/tty', 'w'): pass except IOError: options['startup_log_file'] = None else: options['startup_log_file'] = '/dev/tty' else: options['startup_log_file'] = '/dev/stderr' if options['startup_log_file']: options['httpd_arguments_list'].append('-E') options['httpd_arguments_list'].append(options['startup_log_file']) if options['verbose_debugging']: options['verbose_debugging_flag'] = 'On' else: options['verbose_debugging_flag'] = 'Off' if options['server_name']: host = options['server_name'] else: host = options['host'] options['server_host'] = host if options['port'] == 80: options['url'] = 'http://%s/' % host else: options['url'] = 'http://%s:%s/' % (host, options['port']) if options['https_port'] == 443: options['https_url'] = 'https://%s/' % host elif options['https_port'] is not None: options['https_url'] = 'https://%s:%s/' % (host, options['https_port']) else: options['https_url'] = None if options['orphan_interpreter']: options['httpd_arguments_list'].append('-DORPHAN_INTERPRETER') if options['embedded_mode']: options['httpd_arguments_list'].append('-DEMBEDDED_MODE') options['disable_reloading'] = True if any((options['enable_debugger'], options['enable_coverage'], options['enable_profiler'], options['enable_recorder'], options['enable_gdb'])): options['debug_mode'] = True if options['debug_mode']: options['httpd_arguments_list'].append('-DONE_PROCESS') if options['debug_mode']: if options['enable_coverage']: if not options['coverage_directory']: options['coverage_directory'] = posixpath.join( options['server_root'], 'htmlcov') else: options['coverage_directory'] = posixpath.abspath( options['coverage_directory']) try: os.mkdir(options['coverage_directory']) except Exception: pass if options['enable_profiler']: if not options['profiler_directory']: options['profiler_directory'] = posixpath.join( options['server_root'], 'pstats') else: options['profiler_directory'] = posixpath.abspath( options['profiler_directory']) try: os.mkdir(options['profiler_directory']) except Exception: pass if options['enable_recorder']: if not options['recorder_directory']: options['recorder_directory'] = posixpath.join( options['server_root'], 'archive') else: options['recorder_directory'] = posixpath.abspath( options['recorder_directory']) try: os.mkdir(options['recorder_directory']) except Exception: pass else: options['enable_debugger'] = False options['enable_coverage'] = False options['enable_profiler'] = False options['enable_recorder'] = False options['enable_gdb'] = False options['parent_domain'] = 'unspecified' if options['server_name']: options['httpd_arguments_list'].append('-DMOD_WSGI_VIRTUAL_HOST') if options['server_name'].lower().startswith('www.'): options['httpd_arguments_list'].append('-DMOD_WSGI_REDIRECT_WWW') options['parent_domain'] = options['server_name'][4:] if options['http2']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_HTTP2') if (options['https_port'] and options['ssl_certificate_file'] and options['ssl_certificate_key_file']): options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_HTTPS') if options['ssl_ca_certificate_file']: options['httpd_arguments_list'].append('-DMOD_WSGI_VERIFY_CLIENT') if options['ssl_certificate_chain_file']: options['httpd_arguments_list'].append('-DMOD_WSGI_CERTIFICATE_CHAIN') if options['ssl_environment']: options['httpd_arguments_list'].append('-DMOD_WSGI_SSL_ENVIRONMENT') if options['https_only']: options['httpd_arguments_list'].append('-DMOD_WSGI_HTTPS_ONLY') if options['hsts_policy']: options['httpd_arguments_list'].append('-DMOD_WSGI_HSTS_POLICY') if options['server_aliases']: options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_ALIAS') options['server_aliases'] = ' '.join(options['server_aliases']) if options['allow_localhost']: options['httpd_arguments_list'].append('-DMOD_WSGI_ALLOW_LOCALHOST') if options['application_type'] == 'static': options['httpd_arguments_list'].append('-DMOD_WSGI_STATIC_ONLY') if options['enable_sendfile']: options['httpd_arguments_list'].append('-DMOD_WSGI_ENABLE_SENDFILE') if options['server_metrics']: options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_METRICS') if options['server_status']: options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_METRICS') options['httpd_arguments_list'].append('-DMOD_WSGI_SERVER_STATUS') if options['directory_index']: options['httpd_arguments_list'].append('-DMOD_WSGI_DIRECTORY_INDEX') if options['directory_listing']: options['httpd_arguments_list'].append('-DMOD_WSGI_DIRECTORY_LISTING') if options['error_log_format']: options['httpd_arguments_list'].append('-DMOD_WSGI_ERROR_LOG_FORMAT') if options['access_log']: options['httpd_arguments_list'].append('-DMOD_WSGI_ACCESS_LOG') if options['rotate_logs']: options['httpd_arguments_list'].append('-DMOD_WSGI_ROTATE_LOGS') if options['keep_alive'] != 0: options['httpd_arguments_list'].append('-DMOD_WSGI_KEEP_ALIVE') if options['compress_responses'] != 0: options['httpd_arguments_list'].append('-DMOD_WSGI_COMPRESS_RESPONSES') if options['multiprocess']: options['httpd_arguments_list'].append('-DMOD_WSGI_MULTIPROCESS') if options['listener_host']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_LISTENER_HOST') if options['error_override']: options['httpd_arguments_list'].append('-DMOD_WSGI_ERROR_OVERRIDE') if options['host_access_script']: options['httpd_arguments_list'].append('-DMOD_WSGI_HOST_ACCESS') if options['auth_user_script']: options['httpd_arguments_list'].append('-DMOD_WSGI_AUTH_USER') if options['auth_group_script']: options['httpd_arguments_list'].append('-DMOD_WSGI_AUTH_GROUP') if options['chunked_request']: options['httpd_arguments_list'].append('-DMOD_WSGI_CHUNKED_REQUEST') if options['with_php5']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PHP5') if options['proxy_mount_points'] or options['proxy_virtual_hosts']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PROXY') if options['trusted_proxy_headers']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PROXY_HEADERS') if options['trusted_proxies']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_TRUSTED_PROXIES') if options['python_path']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_PYTHON_PATH') if options['socket_prefix']: options['httpd_arguments_list'].append('-DMOD_WSGI_WITH_SOCKET_PREFIX') if options['disable_reloading']: options['httpd_arguments_list'].append('-DMOD_WSGI_DISABLE_RELOADING') if options['with_cgi']: if os.path.exists(posixpath.join(options['modules_directory'], 'mod_cgid.so')): options['httpd_arguments_list'].append('-DMOD_WSGI_CGID_SCRIPT') else: options['httpd_arguments_list'].append('-DMOD_WSGI_CGI_SCRIPT') options['httpd_arguments_list'].extend( _mpm_module_defines(options['modules_directory'], options['server_mpm_variables'])) options['python_executable'] = sys.executable options['shlibpath_var'] = apxs_config.SHLIBPATH_VAR options['shlibpath'] = apxs_config.SHLIBPATH if _py_dylib: options['httpd_arguments_list'].append('-DMOD_WSGI_LOAD_PYTHON_DYLIB') options['python_dylib'] = _py_dylib options['httpd_arguments'] = '-f %s %s' % (options['httpd_conf'], ' '.join(options['httpd_arguments_list'])) generate_wsgi_handler_script(options) if options['with_newrelic_platform']: generate_server_metrics_script(options) print('Server URL :', options['url']) if options['https_url']: print('Server URL (HTTPS) :', options['https_url']) if options['server_status']: print('Server Status :', '%sserver-status' % options['url']) print('Server Root :', options['server_root']) print('Server Conf :', options['httpd_conf']) print('Error Log File : %s (%s)' % (options['error_log_file'], options['log_level'])) if options['access_log']: print('Access Log File :', options['access_log_file']) if options['startup_log']: print('Startup Log File :', options['startup_log_file']) if options['enable_coverage']: print('Coverage Output :', posixpath.join( options['coverage_directory'], 'index.html')) if options['enable_profiler']: print('Profiler Output :', options['profiler_directory']) if options['enable_recorder']: print('Recorder Output :', options['recorder_directory']) if options['rewrite_rules']: print('Rewrite Rules :', options['rewrite_rules']) if os.name != 'nt': if options['envvars_script']: print('Environ Variables :', options['envvars_script']) if command == 'setup-server' or options['setup_only']: if not options['rewrite_rules']: print('Rewrite Rules :', options['server_root'] + '/rewrite.conf') if os.name != 'nt': if not options['envvars_script']: print('Environ Variables :', options['server_root'] + '/envvars') print('Control Script :', options['server_root'] + '/apachectl') if options['debug_mode']: print('Operating Mode : debug') elif options['embedded_mode']: print('Operating Mode : embedded') else: print('Operating Mode : daemon') if options['processes'] == 1: print('Request Capacity : %s (%s process * %s threads)' % ( options['processes']*options['threads'], options['processes'], options['threads'])) else: print('Request Capacity : %s (%s processes * %s threads)' % ( options['processes']*options['threads'], options['processes'], options['threads'])) if not options['debug_mode'] and not options['embedded_mode']: print('Request Timeout : %s (seconds)' % options['request_timeout']) if options['startup_timeout']: print('Startup Timeout : %s (seconds)' % options['startup_timeout']) print('Queue Backlog : %s (connections)' % options['daemon_backlog']) print('Queue Timeout : %s (seconds)' % options['queue_timeout']) print('Server Capacity : %s (event/worker), %s (prefork)' % ( options['worker_max_clients'], options['prefork_max_clients'])) print('Server Backlog : %s (connections)' % options['server_backlog']) print('Locale Setting :', options['locale']) sys.stdout.flush() if not options['rewrite_rules']: options['rewrite_rules'] = options['server_root'] + '/rewrite.conf' if not os.path.isfile(options['rewrite_rules']): with open(options['rewrite_rules'], 'w') as fp: pass generate_apache_config(options) if os.name != 'nt': generate_control_scripts(options) return options def cmd_start_server(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog start-server script [options]' parser = optparse.OptionParser(usage=usage, option_list=option_list, formatter=formatter) (options, args) = parser.parse_args(params) config = _cmd_setup_server('start-server', args, vars(options)) if config['setup_only']: return if os.name == 'nt': print() print("WARNING: The ability to use the start-server option on Windows") print("WARNING: is highly experimental and various things don't quite") print("WARNING: work properly. If you understand a lot about using") print("WARNING: Python on Windows and Windows programming in general,") print("WARNING: and would like to help to get it working properly, then") print("WARNING: you can ask about Windows support for the start-server") print("WARNING: option on the mod_wsgi mailing list.") print() executable = config['httpd_executable'] environ = copy.deepcopy(os.environ) environ['MOD_WSGI_MODULES_DIRECTORY'] = config['modules_directory'] httpd_arguments = list(config['httpd_arguments_list']) httpd_arguments.extend(['-f', config['httpd_conf']]) httpd_arguments.extend(['-DONE_PROCESS']) os.environ['MOD_WSGI_MODULES_DIRECTORY'] = config['modules_directory'] subprocess.call([executable]+httpd_arguments) sys.exit(0) else: executable = posixpath.join(config['server_root'], 'apachectl') if sys.stdout.isatty() and not config['debug_mode']: process = None def handler(signum, frame): if process is None: sys.exit(1) else: if signum not in [signal.SIGWINCH]: os.kill(process.pid, signum) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGHUP, handler) signal.signal(signal.SIGUSR1, handler) signal.signal(signal.SIGWINCH, handler) process = subprocess.Popen([executable, 'start', '-DFOREGROUND'], preexec_fn=os.setpgrp) process.wait() else: os.execl(executable, executable, 'start', '-DFOREGROUND') def cmd_module_config(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog module-config' parser = optparse.OptionParser(usage=usage, formatter=formatter) (options, args) = parser.parse_args(params) if len(args) != 0: parser.error('Incorrect number of arguments.') if os.name == 'nt': real_prefix = getattr(sys, 'real_prefix', None) base_prefix = getattr(sys, 'base_prefix', None) real_prefix = real_prefix or base_prefix or sys.prefix library_version = sysconfig.get_config_var('VERSION') library_name = 'python%s.dll' % library_version library_path = posixpath.join(real_prefix, library_name) if not os.path.exists(library_path): library_name = 'python%s.dll' % library_version[0] library_path = posixpath.join(real_prefix, 'DLLs', library_name) if not os.path.exists(library_path): library_path = None if library_path: library_path = posixpath.normpath(library_path) library_path = library_path.replace('\\', '/') print('LoadFile "%s"' % library_path) module_path = where() module_path = module_path.replace('\\', '/') prefix = sys.prefix prefix = posixpath.normpath(prefix) prefix = prefix.replace('\\', '/') print('LoadModule wsgi_module "%s"' % module_path) print('WSGIPythonHome "%s"' % prefix) else: module_path = where() prefix = sys.prefix prefix = posixpath.normpath(prefix) if _py_dylib: print('LoadFile "%s"' % _py_dylib) print('LoadModule wsgi_module "%s"' % module_path) print('WSGIPythonHome "%s"' % prefix) def cmd_install_module(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog install-module [options]' parser = optparse.OptionParser(usage=usage, formatter=formatter) parser.add_option('--modules-directory', metavar='DIRECTORY', default=apxs_config.LIBEXECDIR) (options, args) = parser.parse_args(params) if len(args) != 0: parser.error('Incorrect number of arguments.') target = posixpath.abspath(posixpath.join(options.modules_directory, posixpath.basename(MOD_WSGI_SO))) shutil.copyfile(where(), target) if _py_dylib: print('LoadFile "%s"' % _py_dylib) print('LoadModule wsgi_module "%s"' % target) print('WSGIPythonHome "%s"' % posixpath.normpath(sys.prefix)) def cmd_module_location(params): formatter = optparse.IndentedHelpFormatter() formatter.set_long_opt_delimiter(' ') usage = '%prog module-location' parser = optparse.OptionParser(usage=usage, formatter=formatter) (options, args) = parser.parse_args(params) if len(args) != 0: parser.error('Incorrect number of arguments.') print(where()) if os.name == 'nt': main_usage=""" %prog command [params] Commands: module-config module-location """ else: main_usage=""" %prog command [params] Commands: install-module module-config module-location setup-server start-server """ def main(): parser = optparse.OptionParser(main_usage.strip()) args = sys.argv[1:] if not args: parser.error('No command was specified.') command = args.pop(0) args = [os.path.expandvars(arg) for arg in args] if os.name == 'nt': if command == 'module-config': cmd_module_config(args) elif command == 'module-location': cmd_module_location(args) elif command == 'start-server': cmd_start_server(args) else: parser.error('Invalid command was specified.') else: if command == 'install-module': cmd_install_module(args) elif command == 'module-config': cmd_module_config(args) elif command == 'module-location': cmd_module_location(args) elif command == 'setup-server': cmd_setup_server(args) elif command == 'start-server': cmd_start_server(args) else: parser.error('Invalid command was specified.') def start(*args): cmd_start_server(list(args)) if __name__ == '__main__': main() mod_wsgi-5.0.0/src/server/environ.py000066400000000000000000000067531452636074700175040ustar00rootroot00000000000000from __future__ import print_function import os import sys import locale try: from cStringIO import StringIO except ImportError: from io import StringIO import mod_wsgi import apache def application(environ, start_response): headers = [] headers.append(('Content-Type', 'text/plain; charset="UTF-8"')) write = start_response('200 OK', headers) input = environ['wsgi.input'] output = StringIO() print('PID: %s' % os.getpid(), file=output) print('UID: %s' % os.getuid(), file=output) print('GID: %s' % os.getgid(), file=output) print('CWD: %s' % os.getcwd(), file=output) print(file=output) print('python.version: %r' % (sys.version,), file=output) print('python.prefix: %r' % (sys.prefix,), file=output) print('python.path: %r' % (sys.path,), file=output) print(file=output) print('apache.version: %r' % (apache.version,), file=output) print('mod_wsgi.version: %r' % (mod_wsgi.version,), file=output) print(file=output) print('mod_wsgi.process_group: %s' % mod_wsgi.process_group, file=output) print('mod_wsgi.application_group: %s' % mod_wsgi.application_group, file=output) print(file=output) print('mod_wsgi.maximum_processes: %s' % mod_wsgi.maximum_processes, file=output) print('mod_wsgi.threads_per_process: %s' % mod_wsgi.threads_per_process, file=output) print('mod_wsgi.process_metrics: %s' % mod_wsgi.process_metrics(), file=output) print('mod_wsgi.server_metrics: %s' % mod_wsgi.server_metrics(), file=output) print(file=output) metrics = mod_wsgi.server_metrics() if metrics: for process in metrics['processes']: for worker in process['workers']: print(worker['status'], file=output, end='') print(file=output) print(file=output) print('apache.description: %s' % apache.description, file=output) print('apache.build_date: %s' % apache.build_date, file=output) print('apache.mpm_name: %s' % apache.mpm_name, file=output) print('apache.maximum_processes: %s' % apache.maximum_processes, file=output) print('apache.threads_per_process: %s' % apache.threads_per_process, file=output) print(file=output) print('PATH: %s' % sys.path, file=output) print(file=output) print('LANG: %s' % os.environ.get('LANG'), file=output) print('LC_ALL: %s' % os.environ.get('LC_ALL'), file=output) print('sys.getdefaultencoding(): %s' % sys.getdefaultencoding(), file=output) print('sys.getfilesystemencoding(): %s' % sys.getfilesystemencoding(), file=output) print('locale.getlocale(): %s' % (locale.getlocale(),), file=output) print('locale.getdefaultlocale(): %s' % (locale.getdefaultlocale(),), file=output) print('locale.getpreferredencoding(): %s' % locale.getpreferredencoding(), file=output) print(file=output) keys = sorted(environ.keys()) for key in keys: print('%s: %s' % (key, repr(environ[key])), file=output) print(file=output) keys = sorted(os.environ.keys()) for key in keys: print('%s: %s' % (key, repr(os.environ[key])), file=output) print(file=output) result = output.getvalue() if not isinstance(result, bytes): result = result.encode('UTF-8') yield result block_size = 8192 data = input.read(block_size) while data: yield data data = input.read(block_size) mod_wsgi-5.0.0/src/server/management/000077500000000000000000000000001452636074700175535ustar00rootroot00000000000000mod_wsgi-5.0.0/src/server/management/__init__.py000066400000000000000000000000001452636074700216520ustar00rootroot00000000000000mod_wsgi-5.0.0/src/server/management/commands/000077500000000000000000000000001452636074700213545ustar00rootroot00000000000000mod_wsgi-5.0.0/src/server/management/commands/__init__.py000066400000000000000000000000001452636074700234530ustar00rootroot00000000000000mod_wsgi-5.0.0/src/server/management/commands/runmodwsgi.py000066400000000000000000000142621452636074700241310ustar00rootroot00000000000000import os import sys import inspect import signal import subprocess from django.core.management.base import BaseCommand import mod_wsgi.server def check_percentage(string): if value is not None and value < 0 or value > 1: import argparse msg = '%s option value needs to be within the range 0 to 1.' % string raise argparse.ArgumentTypeError(msg) return value class Command(BaseCommand): args = '' help = 'Starts Apache/mod_wsgi web server.' if hasattr(BaseCommand, 'option_list'): # Used prior to Django 1.10. option_list = BaseCommand.option_list + mod_wsgi.server.option_list else: # This horrible mess tries to convert optparse option list to # argparse as required by Django 1.10+. We can't switch to # using argparse as need to still support Python 2.6, which # lacks the argparse module. def add_arguments(self, parser): ignore = set(['const', 'callback', 'callback_args', 'callback_kwargs']) types = { 'int': int, 'string': str } for option in mod_wsgi.server.option_list: opts = option._short_opts + option._long_opts kwargs = {} for attr in option.ATTRS: if attr not in ignore and hasattr(option, attr): if attr == 'type': if getattr(option, attr) in types: kwargs[attr] = types[getattr(option, attr)] elif attr == 'default': if getattr(option, attr) != ('NO', 'DEFAULT'): kwargs[attr] = getattr(option, attr) else: if getattr(option, attr) is not None: kwargs[attr] = getattr(option, attr) if (kwargs.get('action') == 'callback' and option.callback.__name__ == 'check_percentage'): del kwargs['action'] kwargs['type'] = check_percentage if kwargs.get('nargs') == 1: del kwargs['nargs'] parser.add_argument(*opts, **kwargs) def handle(self, *args, **options): self.stdout.write('Successfully ran command.') from django.conf import settings wsgi_application = settings.WSGI_APPLICATION fields = wsgi_application.split('.') module_name = '.'.join(fields[:-1]) callable_object = fields[-1] # XXX Can't test import as loading the WSGI module may have # side effects and run things that should only be run inside # of the mod_wsgi process. # # __import__(module_name) options['application_type'] = 'module' options['callable_object'] = callable_object args = [module_name] # If there is no BASE_DIR in Django settings, assume that the # current working directory is the parent directory of the # directory the settings module is in. Either way, allow the # --working-directory option to override it to deal with where # meaning of BASE_DIR in the Django settings was changed. if options.get('working_directory') is None: if hasattr(settings, 'BASE_DIR'): options['working_directory'] = settings.BASE_DIR else: settings_module_path = os.environ['DJANGO_SETTINGS_MODULE'] root_module_path = settings_module_path.split('.')[0] root_module = sys.modules[root_module_path] parent = os.path.dirname(os.path.dirname(root_module.__file__)) options['working_directory'] = parent url_aliases = options.setdefault('url_aliases') or [] try: middleware = getattr(settings, 'MIDDLEWARE', None) if middleware is None: middleware = getattr(settings, 'MIDDLEWARE_CLASSES', []) if 'whitenoise.middleware.WhiteNoiseMiddleware' not in middleware: if settings.STATIC_URL and settings.STATIC_URL.startswith('/'): if settings.STATIC_ROOT: # We need a fiddle here as depending on the Python # version used, the list of URL aliases we are # passed could be either list of tuples or list of # lists. We need to ensure we use the same type so # that sorting of items in the lists works later. if not url_aliases: url_aliases.insert(0, ( settings.STATIC_URL.rstrip('/') or '/', settings.STATIC_ROOT)) else: url_aliases.insert(0, type(url_aliases[0])(( settings.STATIC_URL.rstrip('/') or '/', settings.STATIC_ROOT))) except AttributeError: pass options['url_aliases'] = url_aliases options = mod_wsgi.server._cmd_setup_server( 'start-server', args, options) if options['setup_only']: return executable = os.path.join(options['server_root'], 'apachectl') name = executable.ljust(len(options['process_name'])) if sys.stdout.isatty() and not options['debug_mode']: process = None def handler(signum, frame): if process is None: sys.exit(1) else: if signum not in [signal.SIGWINCH]: os.kill(process.pid, signum) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGHUP, handler) signal.signal(signal.SIGUSR1, handler) signal.signal(signal.SIGWINCH, handler) process = subprocess.Popen([executable, 'start', '-DFOREGROUND'], preexec_fn=os.setpgrp) process.wait() else: os.execl(executable, name, 'start', '-DFOREGROUND') mod_wsgi-5.0.0/src/server/mod_wsgi.c000066400000000000000000017670641452636074700174400ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_apache.h" #include "wsgi_python.h" #ifdef HAVE_SYS_PRCTL_H #include #endif #ifndef WIN32 #include #endif static PyTypeObject Auth_Type; #if AP_SERVER_MINORVERSION_NUMBER >= 2 #define MOD_WSGI_WITH_AUTHN_PROVIDER 1 #endif #if AP_MODULE_MAGIC_AT_LEAST(20060110,0) #define MOD_WSGI_WITH_AUTHZ_PROVIDER 1 #if AP_MODULE_MAGIC_AT_LEAST(20100919,0) #define MOD_WSGI_WITH_AUTHZ_PROVIDER_PARSED 1 #endif #endif #if defined(MOD_WSGI_WITH_AUTHN_PROVIDER) #include "mod_auth.h" #include "ap_provider.h" #ifndef AUTHN_PROVIDER_VERSION #define AUTHN_PROVIDER_VERSION "0" #endif #endif /* Local project header files. */ #include "wsgi_version.h" #include "wsgi_convert.h" #include "wsgi_validate.h" #include "wsgi_interp.h" #include "wsgi_server.h" #include "wsgi_logger.h" #include "wsgi_restrict.h" #include "wsgi_stream.h" #include "wsgi_metrics.h" #include "wsgi_daemon.h" #include "wsgi_buckets.h" #include "wsgi_thread.h" /* Module information. */ module AP_MODULE_DECLARE_DATA wsgi_module; /* Process information. */ static int wsgi_multiprocess = 1; static int wsgi_multithread = 1; /* Daemon information. */ static apr_array_header_t *wsgi_daemon_list = NULL; static apr_pool_t *wsgi_parent_pool = NULL; int volatile wsgi_daemon_shutdown = 0; static int volatile wsgi_daemon_graceful = 0; static int wsgi_dump_stack_traces = 0; static char *wsgi_shutdown_reason = ""; #if defined(MOD_WSGI_WITH_DAEMONS) static apr_interval_time_t wsgi_startup_timeout = 0; static apr_interval_time_t wsgi_deadlock_timeout = 0; static apr_interval_time_t wsgi_idle_timeout = 0; static apr_interval_time_t wsgi_request_timeout = 0; static apr_interval_time_t wsgi_graceful_timeout = 0; static apr_interval_time_t wsgi_eviction_timeout = 0; static apr_interval_time_t wsgi_restart_interval = 0; static apr_time_t volatile wsgi_startup_shutdown_time = 0; static apr_time_t volatile wsgi_deadlock_shutdown_time = 0; static apr_time_t volatile wsgi_idle_shutdown_time = 0; static apr_time_t volatile wsgi_graceful_shutdown_time = 0; static apr_time_t volatile wsgi_restart_shutdown_time = 0; #endif /* Script information. */ static apr_array_header_t *wsgi_import_list = NULL; static void *wsgi_create_server_config(apr_pool_t *p, server_rec *s) { WSGIServerConfig *config = NULL; config = newWSGIServerConfig(p); return config; } static void *wsgi_merge_server_config(apr_pool_t *p, void *base_conf, void *new_conf) { WSGIServerConfig *config = NULL; WSGIServerConfig *parent = NULL; WSGIServerConfig *child = NULL; config = newWSGIServerConfig(p); parent = (WSGIServerConfig *)base_conf; child = (WSGIServerConfig *)new_conf; if (child->alias_list && parent->alias_list) { config->alias_list = apr_array_append(p, child->alias_list, parent->alias_list); } else if (child->alias_list) { config->alias_list = apr_array_make(p, 20, sizeof(WSGIAliasEntry)); apr_array_cat(config->alias_list, child->alias_list); } else if (parent->alias_list) { config->alias_list = apr_array_make(p, 20, sizeof(WSGIAliasEntry)); apr_array_cat(config->alias_list, parent->alias_list); } if (child->restrict_process) config->restrict_process = child->restrict_process; else config->restrict_process = parent->restrict_process; if (child->process_group) config->process_group = child->process_group; else config->process_group = parent->process_group; if (child->application_group) config->application_group = child->application_group; else config->application_group = parent->application_group; if (child->callable_object) config->callable_object = child->callable_object; else config->callable_object = parent->callable_object; if (child->dispatch_script) config->dispatch_script = child->dispatch_script; else config->dispatch_script = parent->dispatch_script; if (child->pass_apache_request != -1) config->pass_apache_request = child->pass_apache_request; else config->pass_apache_request = parent->pass_apache_request; if (child->pass_authorization != -1) config->pass_authorization = child->pass_authorization; else config->pass_authorization = parent->pass_authorization; if (child->script_reloading != -1) config->script_reloading = child->script_reloading; else config->script_reloading = parent->script_reloading; if (child->error_override != -1) config->error_override = child->error_override; else config->error_override = parent->error_override; if (child->chunked_request != -1) config->chunked_request = child->chunked_request; else config->chunked_request = parent->chunked_request; if (child->map_head_to_get != -1) config->map_head_to_get = child->map_head_to_get; else config->map_head_to_get = parent->map_head_to_get; if (child->ignore_activity != -1) config->ignore_activity = child->ignore_activity; else config->ignore_activity = parent->ignore_activity; if (child->trusted_proxy_headers) config->trusted_proxy_headers = child->trusted_proxy_headers; else config->trusted_proxy_headers = parent->trusted_proxy_headers; if (child->trusted_proxies) config->trusted_proxies = child->trusted_proxies; else config->trusted_proxies = parent->trusted_proxies; if (child->enable_sendfile != -1) config->enable_sendfile = child->enable_sendfile; else config->enable_sendfile = parent->enable_sendfile; if (!child->handler_scripts) config->handler_scripts = parent->handler_scripts; else if (!parent->handler_scripts) config->handler_scripts = child->handler_scripts; else { config->handler_scripts = apr_hash_overlay(p, child->handler_scripts, parent->handler_scripts); } return config; } typedef struct { apr_pool_t *pool; apr_table_t *restrict_process; const char *process_group; const char *application_group; const char *callable_object; WSGIScriptFile *dispatch_script; int pass_apache_request; int pass_authorization; int script_reloading; int error_override; int chunked_request; int map_head_to_get; int ignore_activity; apr_array_header_t *trusted_proxy_headers; apr_array_header_t *trusted_proxies; int enable_sendfile; WSGIScriptFile *access_script; WSGIScriptFile *auth_user_script; WSGIScriptFile *auth_group_script; int user_authoritative; int group_authoritative; apr_hash_t *handler_scripts; } WSGIDirectoryConfig; static WSGIDirectoryConfig *newWSGIDirectoryConfig(apr_pool_t *p) { WSGIDirectoryConfig *object = NULL; object = (WSGIDirectoryConfig *)apr_pcalloc(p, sizeof(WSGIDirectoryConfig)); object->pool = p; object->process_group = NULL; object->application_group = NULL; object->callable_object = NULL; object->dispatch_script = NULL; object->pass_apache_request = -1; object->pass_authorization = -1; object->script_reloading = -1; object->error_override = -1; object->chunked_request = -1; object->map_head_to_get = -1; object->ignore_activity = -1; object->trusted_proxy_headers = NULL; object->trusted_proxies = NULL; object->enable_sendfile = -1; object->access_script = NULL; object->auth_user_script = NULL; object->auth_group_script = NULL; object->user_authoritative = -1; object->group_authoritative = -1; return object; } static void *wsgi_create_dir_config(apr_pool_t *p, char *dir) { WSGIDirectoryConfig *config = NULL; config = newWSGIDirectoryConfig(p); return config; } static void *wsgi_merge_dir_config(apr_pool_t *p, void *base_conf, void *new_conf) { WSGIDirectoryConfig *config = NULL; WSGIDirectoryConfig *parent = NULL; WSGIDirectoryConfig *child = NULL; config = newWSGIDirectoryConfig(p); parent = (WSGIDirectoryConfig *)base_conf; child = (WSGIDirectoryConfig *)new_conf; if (child->restrict_process) config->restrict_process = child->restrict_process; else config->restrict_process = parent->restrict_process; if (child->process_group) config->process_group = child->process_group; else config->process_group = parent->process_group; if (child->application_group) config->application_group = child->application_group; else config->application_group = parent->application_group; if (child->callable_object) config->callable_object = child->callable_object; else config->callable_object = parent->callable_object; if (child->dispatch_script) config->dispatch_script = child->dispatch_script; else config->dispatch_script = parent->dispatch_script; if (child->pass_apache_request != -1) config->pass_apache_request = child->pass_apache_request; else config->pass_apache_request = parent->pass_apache_request; if (child->pass_authorization != -1) config->pass_authorization = child->pass_authorization; else config->pass_authorization = parent->pass_authorization; if (child->script_reloading != -1) config->script_reloading = child->script_reloading; else config->script_reloading = parent->script_reloading; if (child->error_override != -1) config->error_override = child->error_override; else config->error_override = parent->error_override; if (child->chunked_request != -1) config->chunked_request = child->chunked_request; else config->chunked_request = parent->chunked_request; if (child->map_head_to_get != -1) config->map_head_to_get = child->map_head_to_get; else config->map_head_to_get = parent->map_head_to_get; if (child->ignore_activity != -1) config->ignore_activity = child->ignore_activity; else config->ignore_activity = parent->ignore_activity; if (child->trusted_proxy_headers) config->trusted_proxy_headers = child->trusted_proxy_headers; else config->trusted_proxy_headers = parent->trusted_proxy_headers; if (child->trusted_proxies) config->trusted_proxies = child->trusted_proxies; else config->trusted_proxies = parent->trusted_proxies; if (child->enable_sendfile != -1) config->enable_sendfile = child->enable_sendfile; else config->enable_sendfile = parent->enable_sendfile; if (child->access_script) config->access_script = child->access_script; else config->access_script = parent->access_script; if (child->auth_user_script) config->auth_user_script = child->auth_user_script; else config->auth_user_script = parent->auth_user_script; if (child->auth_group_script) config->auth_group_script = child->auth_group_script; else config->auth_group_script = parent->auth_group_script; if (child->user_authoritative != -1) config->user_authoritative = child->user_authoritative; else config->user_authoritative = parent->user_authoritative; if (child->group_authoritative != -1) config->group_authoritative = child->group_authoritative; else config->group_authoritative = parent->group_authoritative; if (!child->handler_scripts) config->handler_scripts = parent->handler_scripts; else if (!parent->handler_scripts) config->handler_scripts = child->handler_scripts; else { config->handler_scripts = apr_hash_overlay(p, child->handler_scripts, parent->handler_scripts); } return config; } typedef struct { apr_pool_t *pool; apr_table_t *restrict_process; const char *process_group; const char *application_group; const char *callable_object; WSGIScriptFile *dispatch_script; int pass_apache_request; int pass_authorization; int script_reloading; int error_override; int chunked_request; int map_head_to_get; int ignore_activity; apr_array_header_t *trusted_proxy_headers; apr_array_header_t *trusted_proxies; int enable_sendfile; WSGIScriptFile *access_script; WSGIScriptFile *auth_user_script; WSGIScriptFile *auth_group_script; int user_authoritative; int group_authoritative; apr_hash_t *handler_scripts; const char *handler_script; int daemon_connects; int daemon_restarts; apr_time_t request_start; apr_time_t queue_start; apr_time_t daemon_start; } WSGIRequestConfig; static long wsgi_find_path_info(const char *uri, const char *path_info) { long lu = strlen(uri); long lp = strlen(path_info); while (lu-- && lp-- && uri[lu] == path_info[lp]) { if (path_info[lp] == '/') { while (lu && uri[lu-1] == '/') lu--; } } if (lu == -1) { lu = 0; } while (uri[lu] != '\0' && uri[lu] != '/') { lu++; } return lu; } static const char *wsgi_script_name(request_rec *r) { char *script_name = NULL; long path_info_start = 0; if (!r->path_info || !*r->path_info) { script_name = apr_pstrdup(r->pool, r->uri); } else { path_info_start = wsgi_find_path_info(r->uri, r->path_info); script_name = apr_pstrndup(r->pool, r->uri, path_info_start); } if (*script_name) { while (*script_name && (*(script_name+1) == '/')) script_name++; script_name = apr_pstrdup(r->pool, script_name); ap_no2slash((char*)script_name); } ap_str_tolower(script_name); return script_name; } static const char *wsgi_process_group(request_rec *r, const char *s) { const char *name = NULL; const char *value = NULL; const char *h = NULL; apr_port_t p = 0; const char *n = NULL; if (!s) return ""; if (*s != '%') return s; name = s + 1; if (*name) { if (!strcmp(name, "{GLOBAL}")) return ""; if (!strcmp(name, "{RESOURCE}")) { h = r->server->server_hostname; p = ap_get_server_port(r); n = wsgi_script_name(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u|%s", h, p, n); else return apr_psprintf(r->pool, "%s|%s", h, n); } if (!strcmp(name, "{SERVER}")) { h = r->server->server_hostname; p = ap_get_server_port(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u", h, p); else return h; } if (!strcmp(name, "{HOST}")) { h = r->hostname; p = ap_get_server_port(r); /* * The Host header could be empty or absent for HTTP/1.0 * or older. In that case fallback to ServerName. */ if (h == NULL || *h == 0) h = r->server->server_hostname; if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u", h, p); else return h; } if (strstr(name, "{ENV:") == name) { long len = 0; name = name + 5; len = strlen(name); if (len && name[len-1] == '}') { name = apr_pstrndup(r->pool, name, len-1); value = apr_table_get(r->notes, name); if (!value) value = apr_table_get(r->subprocess_env, name); if (!value) value = getenv(name); if (value) { if (*value == '%' && strstr(value, "%{ENV:") != value) return wsgi_process_group(r, value); return value; } } } } return s; } static const char *wsgi_server_group(request_rec *r, const char *s) { const char *name = NULL; const char *h = NULL; apr_port_t p = 0; if (!s) return ""; if (*s != '%') return s; name = s + 1; if (*name) { if (!strcmp(name, "{GLOBAL}")) return ""; if (!strcmp(name, "{SERVER}")) { h = r->server->server_hostname; p = ap_get_server_port(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u", h, p); else return h; } if (!strcmp(name, "{HOST}")) { h = r->hostname; p = ap_get_server_port(r); /* * The Host header could be empty or absent for HTTP/1.0 * or older. In that case fallback to ServerName. */ if (h == NULL || *h == 0) h = r->server->server_hostname; if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u", h, p); else return h; } } return s; } static const char *wsgi_application_group(request_rec *r, const char *s) { const char *name = NULL; const char *value = NULL; const char *h = NULL; apr_port_t p = 0; const char *n = NULL; if (!s) { h = r->server->server_hostname; p = ap_get_server_port(r); n = wsgi_script_name(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u|%s", h, p, n); else return apr_psprintf(r->pool, "%s|%s", h, n); } if (*s != '%') return s; name = s + 1; if (*name) { if (!strcmp(name, "{GLOBAL}")) return ""; if (!strcmp(name, "{RESOURCE}")) { h = r->server->server_hostname; p = ap_get_server_port(r); n = wsgi_script_name(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u|%s", h, p, n); else return apr_psprintf(r->pool, "%s|%s", h, n); } if (!strcmp(name, "{SERVER}")) { h = r->server->server_hostname; p = ap_get_server_port(r); if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u", h, p); else return h; } if (!strcmp(name, "{HOST}")) { h = r->hostname; p = ap_get_server_port(r); /* * The Host header could be empty or absent for HTTP/1.0 * or older. In that case fallback to ServerName. */ if (h == NULL || *h == 0) h = r->server->server_hostname; if (p != DEFAULT_HTTP_PORT && p != DEFAULT_HTTPS_PORT) return apr_psprintf(r->pool, "%s:%u", h, p); else return h; } if (strstr(name, "{ENV:") == name) { long len = 0; name = name + 5; len = strlen(name); if (len && name[len-1] == '}') { name = apr_pstrndup(r->pool, name, len-1); value = apr_table_get(r->notes, name); if (!value) value = apr_table_get(r->subprocess_env, name); if (!value) value = getenv(name); if (value) { if (*value == '%' && strstr(value, "%{ENV:") != value) return wsgi_application_group(r, value); return value; } } } } return s; } static const char *wsgi_callable_object(request_rec *r, const char *s) { const char *name = NULL; const char *value = NULL; if (!s) return "application"; if (*s != '%') return s; name = s + 1; if (!*name) return "application"; if (strstr(name, "{ENV:") == name) { long len = 0; name = name + 5; len = strlen(name); if (len && name[len-1] == '}') { name = apr_pstrndup(r->pool, name, len-1); value = apr_table_get(r->notes, name); if (!value) value = apr_table_get(r->subprocess_env, name); if (!value) value = getenv(name); if (value) return value; } } return "application"; } static WSGIRequestConfig *wsgi_create_req_config(apr_pool_t *p, request_rec *r) { WSGIRequestConfig *config = NULL; WSGIServerConfig *sconfig = NULL; WSGIDirectoryConfig *dconfig = NULL; config = (WSGIRequestConfig *)apr_pcalloc(p, sizeof(WSGIRequestConfig)); dconfig = ap_get_module_config(r->per_dir_config, &wsgi_module); sconfig = ap_get_module_config(r->server->module_config, &wsgi_module); config->pool = p; config->restrict_process = dconfig->restrict_process; if (!config->restrict_process) config->restrict_process = sconfig->restrict_process; config->process_group = dconfig->process_group; if (!config->process_group) config->process_group = sconfig->process_group; config->process_group = wsgi_process_group(r, config->process_group); config->application_group = dconfig->application_group; if (!config->application_group) config->application_group = sconfig->application_group; config->application_group = wsgi_application_group(r, config->application_group); config->callable_object = dconfig->callable_object; if (!config->callable_object) config->callable_object = sconfig->callable_object; config->callable_object = wsgi_callable_object(r, config->callable_object); config->dispatch_script = dconfig->dispatch_script; if (!config->dispatch_script) config->dispatch_script = sconfig->dispatch_script; config->pass_apache_request = dconfig->pass_apache_request; if (config->pass_apache_request < 0) { config->pass_apache_request = sconfig->pass_apache_request; if (config->pass_apache_request < 0) config->pass_apache_request = 0; } config->pass_authorization = dconfig->pass_authorization; if (config->pass_authorization < 0) { config->pass_authorization = sconfig->pass_authorization; if (config->pass_authorization < 0) config->pass_authorization = 0; } config->script_reloading = dconfig->script_reloading; if (config->script_reloading < 0) { config->script_reloading = sconfig->script_reloading; if (config->script_reloading < 0) config->script_reloading = 1; } config->error_override = dconfig->error_override; if (config->error_override < 0) { config->error_override = sconfig->error_override; if (config->error_override < 0) config->error_override = 0; } config->chunked_request = dconfig->chunked_request; if (config->chunked_request < 0) { config->chunked_request = sconfig->chunked_request; if (config->chunked_request < 0) config->chunked_request = 0; } config->map_head_to_get = dconfig->map_head_to_get; if (config->map_head_to_get < 0) { config->map_head_to_get = sconfig->map_head_to_get; if (config->map_head_to_get < 0) config->map_head_to_get = 2; } config->ignore_activity = dconfig->ignore_activity; if (config->ignore_activity < 0) { config->ignore_activity = sconfig->ignore_activity; if (config->ignore_activity < 0) config->ignore_activity = 0; } config->trusted_proxy_headers = dconfig->trusted_proxy_headers; if (!config->trusted_proxy_headers) config->trusted_proxy_headers = sconfig->trusted_proxy_headers; config->trusted_proxies = dconfig->trusted_proxies; if (!config->trusted_proxies) config->trusted_proxies = sconfig->trusted_proxies; config->enable_sendfile = dconfig->enable_sendfile; if (config->enable_sendfile < 0) { config->enable_sendfile = sconfig->enable_sendfile; if (config->enable_sendfile < 0) config->enable_sendfile = 0; } config->access_script = dconfig->access_script; config->auth_user_script = dconfig->auth_user_script; config->auth_group_script = dconfig->auth_group_script; config->user_authoritative = dconfig->user_authoritative; if (config->user_authoritative == -1) config->user_authoritative = 1; config->group_authoritative = dconfig->group_authoritative; if (config->group_authoritative == -1) config->group_authoritative = 1; if (!dconfig->handler_scripts) config->handler_scripts = sconfig->handler_scripts; else if (!sconfig->handler_scripts) config->handler_scripts = dconfig->handler_scripts; else { config->handler_scripts = apr_hash_overlay(p, dconfig->handler_scripts, sconfig->handler_scripts); } config->handler_script = ""; config->daemon_connects = 0; config->daemon_restarts = 0; config->request_start = 0; config->queue_start = 0; config->daemon_start = 0; return config; } /* Error reporting. */ static void wsgi_log_script_error(request_rec *r, const char *e, const char *n) { char *message = NULL; if (!n) n = r->filename; message = apr_psprintf(r->pool, "%s: %s", e, n); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "%s", message); } /* Class objects used by response handler. */ static PyTypeObject Dispatch_Type; typedef struct { PyObject_HEAD request_rec *r; int init; int done; char *buffer; apr_off_t size; apr_off_t offset; apr_off_t length; apr_bucket_brigade *bb; int seen_eos; int seen_error; apr_off_t bytes; apr_off_t reads; apr_time_t time; int ignore_activity; } InputObject; static PyTypeObject Input_Type; static InputObject *newInputObject(request_rec *r, int ignore_activity) { InputObject *self; self = PyObject_New(InputObject, &Input_Type); if (self == NULL) return NULL; self->r = r; self->init = 0; self->done = 0; self->buffer = NULL; self->size = 0; self->offset = 0; self->length = 0; self->bb = NULL; self->seen_eos = 0; self->seen_error = 0; self->bytes = 0; self->reads = 0; self->time = 0; self->ignore_activity = ignore_activity; return self; } static void Input_dealloc(InputObject *self) { if (self->buffer) free(self->buffer); PyObject_Del(self); } static void Input_finish(InputObject *self) { if (self->bb) { Py_BEGIN_ALLOW_THREADS apr_brigade_destroy(self->bb); Py_END_ALLOW_THREADS self->bb = NULL; } self->r = NULL; } static PyObject *Input_close(InputObject *self, PyObject *args) { if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } Py_INCREF(Py_None); return Py_None; } static apr_status_t wsgi_strtoff(apr_off_t *offset, const char *nptr, char **endptr, int base) { errno = 0; if (sizeof(apr_off_t) == 4) { *offset = strtol(nptr, endptr, base); } else { *offset = apr_strtoi64(nptr, endptr, base); } return APR_FROM_OS_ERROR(errno); } static apr_int64_t Input_read_from_input(InputObject *self, char *buffer, apr_size_t bufsiz) { request_rec *r = self->r; apr_bucket_brigade *bb = self->bb; apr_status_t rv; apr_status_t error_status = 0; const char *error_message = NULL; apr_time_t start = 0; apr_time_t finish = 0; /* If have already seen end of input, return an empty string. */ if (self->seen_eos) return 0; /* If have already encountered an error, then raise a new error. */ if (self->seen_error) { PyErr_SetString(PyExc_IOError, "Apache/mod_wsgi request data read " "error: Input is already in error state."); return -1; } /* * When reaading the request content we will be saying that we * should block if there is no input data available at that * point but not all data has been exhausted. We therefore need * to ensure that we do not cause Python as a whole to block by * releasing the GIL, but also must remember to reacquire the GIL * when we exit. */ Py_BEGIN_ALLOW_THREADS start = apr_time_now(); self->reads += 1; /* * Create the bucket brigade the first time it is required and * save it against the input object. We need to make sure we * perform a cleanup, but not destroy, the bucket brigade each * time we exit this function. */ if (!bb) { bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); if (bb == NULL) { r->connection->keepalive = AP_CONN_CLOSE; error_message = "Unable to create bucket brigade"; goto finally; } self->bb = bb; } /* Force the required amount of input to be read. */ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES, APR_BLOCK_READ, bufsiz); if (rv != APR_SUCCESS) { /* * If we actually fail here, we want to just return and * stop trying to read data from the client. The HTTP_IN * input filter is a bit of a pain here as it can return * EAGAIN in various strange situations where it isn't * believed that it means to retry, but that it is still * a permanent failure. This can include timeouts and * errors in chunked encoding format. To avoid a message * of 'Resource temporarily unavailable' which could be * confusing, replace it with a generic message that the * connection was terminated. */ r->connection->keepalive = AP_CONN_CLOSE; if (APR_STATUS_IS_EAGAIN(rv)) error_message = "Connection was terminated"; else error_status = rv; goto finally; } /* * If this fails, it means that a filter is written incorrectly and * that it needs to learn how to properly handle APR_BLOCK_READ * requests by returning data when requested. */ AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(bb)); /* * Check to see if EOS terminates the brigade. If so, we remember * this to avoid any attempts to read more data in future calls. */ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) self->seen_eos = 1; /* Now extract the actual data from the bucket brigade. */ rv = apr_brigade_flatten(bb, buffer, &bufsiz); if (rv != APR_SUCCESS) { error_status = rv; goto finally; } finally: /* * We must always cleanup up, not destroy, the brigade after * each call. */ if (bb) apr_brigade_cleanup(bb); finish = apr_time_now(); if (finish > start) self->time += (finish - start); /* Make sure we reacquire the GIL when all done. */ Py_END_ALLOW_THREADS /* * Set any Python exception when an error has occurred and * remember there was an error so can flag on subsequent * reads that already in an error state. */ if (error_status) { char status_buffer[512]; error_message = apr_psprintf(r->pool, "Apache/mod_wsgi request " "data read error: %s.", apr_strerror(error_status, status_buffer, sizeof(status_buffer)-1)); PyErr_SetString(PyExc_IOError, error_message); self->seen_error = 1; return -1; } else if (error_message) { error_message = apr_psprintf(r->pool, "Apache/mod_wsgi request " "data read error: %s.", error_message); PyErr_SetString(PyExc_IOError, error_message); self->seen_error = 1; return -1; } /* * Finally return the amount of data that was read. This will be * zero if all data has been consumed. */ return bufsiz; } static PyObject *Input_read(InputObject *self, PyObject *args) { #if defined(HAVE_LONG_LONG) PY_LONG_LONG size = -1; #else long size = -1; #endif PyObject *result = NULL; char *buffer = NULL; apr_off_t length = 0; int init = 0; apr_int64_t n; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } #if defined(HAVE_LONG_LONG) if (!PyArg_ParseTuple(args, "|L:read", &size)) return NULL; #else if (!PyArg_ParseTuple(args, "|l:read", &size)) return NULL; #endif #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_idle_timeout && !self->ignore_activity) { apr_thread_mutex_lock(wsgi_monitor_lock); if (wsgi_idle_timeout) { wsgi_idle_shutdown_time = apr_time_now(); wsgi_idle_shutdown_time += wsgi_idle_timeout; } apr_thread_mutex_unlock(wsgi_monitor_lock); } #endif if (self->seen_error) { PyErr_SetString(PyExc_IOError, "Apache/mod_wsgi request data read " "error: Input is already in error state."); return NULL; } init = self->init; if (!self->init) self->init = 1; /* No point continuing if no more data to be consumed. */ if (self->done && self->length == 0) return PyString_FromString(""); /* * If requested size is zero bytes, then still need to pass * this through to Apache input filters so that any * 100-continue response is triggered. Only do this if very * first attempt to read data. Note that this will cause an * assertion failure in HTTP_IN input filter when Apache * maintainer mode is enabled. It is arguable that the * assertion check, which prohibits a zero length read, * shouldn't exist, as why should a zero length read be not * allowed if input filter processing still works when it * does occur. */ if (size == 0) { if (!init) { char dummy[1]; n = Input_read_from_input(self, dummy, 0); if (n == -1) return NULL; } return PyString_FromString(""); } /* * First deal with case where size has been specified. After * that deal with case where expected that all remaining * data is to be read in and returned as one string. */ if (size > 0) { /* Allocate string of the exact size required. */ result = PyString_FromStringAndSize(NULL, size); if (!result) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); /* Copy any residual data from use of readline(). */ if (self->buffer && self->length) { if (size >= self->length) { length = self->length; memcpy(buffer, self->buffer + self->offset, length); self->offset = 0; self->length = 0; } else { length = size; memcpy(buffer, self->buffer + self->offset, length); self->offset += length; self->length -= length; } } /* If all data residual buffer consumed then free it. */ if (!self->length) { free(self->buffer); self->buffer = NULL; } /* Read in remaining data required to achieve size. */ if (length < size) { while (length != size) { n = Input_read_from_input(self, buffer+length, size-length); if (n == -1) { Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; break; } length += n; } /* * Resize the final string. If the size reduction is * by more than 25% of the string size, then Python * will allocate a new block of memory and copy the * data into it. */ if (length != size) { if (_PyString_Resize(&result, length)) return NULL; } } } else { /* * Here we are going to try and read in all the * remaining data. First we have to allocate a suitably * large string, but we can't fully trust the amount * that the request structure says is remaining based on * the original content length though, as an input * filter can insert/remove data from the input stream * thereby invalidating the original content length. * What we do is allow for an extra 25% above what we * have already buffered and what the request structure * says is remaining. A value of 25% has been chosen so * as to match best how Python handles resizing of * strings. Note that even though we do this and allow * all available content, strictly speaking the WSGI * specification says we should only read up until content * length. This though is because the WSGI specification * is deficient in dealing with the concept of mutating * input filters. Since read() with no argument is also * not allowed by WSGI specification implement it in the * way which is most logical and ensure that input data * is not truncated. */ if (self->buffer) { size = self->length; size = size + (size >> 2); if (size < HUGE_STRING_LEN) size = HUGE_STRING_LEN; } else size = HUGE_STRING_LEN; /* Allocate string of the estimated size. */ result = PyString_FromStringAndSize(NULL, size); if (!result) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); /* * Copy any residual data from use of readline(). The * residual should always be less in size than the * string we have allocated to hold it, so can consume * all of it. */ if (self->buffer && self->length) { length = self->length; memcpy(buffer, self->buffer + self->offset, length); self->offset = 0; self->length = 0; free(self->buffer); self->buffer = NULL; } /* Now make first attempt at reading remaining data. */ n = Input_read_from_input(self, buffer+length, size-length); if (n == -1) { Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; } length += n; /* * Don't just assume that all data has been read if * amount read was less than that requested. Still must * perform a read which returns that no more data found. */ while (!self->done) { if (length == size) { /* Increase the size of the string by 25%. */ size = size + (size >> 2); if (_PyString_Resize(&result, size)) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); } /* Now make succesive attempt at reading data. */ n = Input_read_from_input(self, buffer+length, size-length); if (n == -1) { Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; } length += n; } /* * Resize the final string. If the size reduction is by * more than 25% of the string size, then Python will * allocate a new block of memory and copy the data into * it. */ if (length != size) { if (_PyString_Resize(&result, length)) return NULL; } } self->bytes += length; return result; } static PyObject *Input_readline(InputObject *self, PyObject *args) { #if defined(HAVE_LONG_LONG) PY_LONG_LONG size = -1; #else long size = -1; #endif PyObject *result = NULL; char *buffer = NULL; apr_off_t length = 0; apr_int64_t n; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } #if defined(HAVE_LONG_LONG) if (!PyArg_ParseTuple(args, "|L:readline", &size)) return NULL; #else if (!PyArg_ParseTuple(args, "|l:readline", &size)) return NULL; #endif if (self->seen_error) { PyErr_SetString(PyExc_IOError, "Apache/mod_wsgi request data read " "error: Input is already in error state."); return NULL; } if (!self->init) self->init = 1; /* * No point continuing if requested size is zero or if no * more data to read and no buffered data. */ if ((self->done && self->length == 0) || size == 0) return PyString_FromString(""); /* * First deal with case where size has been specified. After * that deal with case where expected that a complete line * is returned regardless of the size. */ if (size > 0) { /* Allocate string of the exact size required. */ result = PyString_FromStringAndSize(NULL, size); if (!result) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); /* Copy any residual data from use of readline(). */ if (self->buffer && self->length) { char *p = NULL; const char *q = NULL; p = buffer; q = self->buffer + self->offset; while (self->length && length < size) { self->offset++; self->length--; length++; if ((*p++ = *q++) == '\n') break; } /* If all data in residual buffer consumed then free it. */ if (!self->length) { free(self->buffer); self->buffer = NULL; } } /* * Read in remaining data required to achieve size. Note * that can't just return whatever the first read might * have returned if no EOL encountered as must return * exactly the required size if no EOL unless that would * have exhausted all input. */ while ((!length || buffer[length-1] != '\n') && !self->done && length < size) { char *p = NULL; char *q = NULL; n = Input_read_from_input(self, buffer+length, size-length); if (n == -1) { Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; } else { /* * Search for embedded EOL in what was read and if * found copy any residual into a buffer for use * next time the read functions are called. */ p = buffer + length; q = p + n; while (p != q) { length++; if (*p++ == '\n') break; } if (p != q) { self->size = q - p; self->buffer = (char *)malloc(self->size); self->offset = 0; self->length = self->size; memcpy(self->buffer, p, self->size); } } } /* * Resize the final string. If the size reduction is * by more than 25% of the string size, then Python * will allocate a new block of memory and copy the * data into it. */ if (length != size) { if (_PyString_Resize(&result, length)) return NULL; } } else { /* * Here we have to read in a line but where we have no * idea how long it may be. What we can do first is if * we have any residual data from a previous read * operation, see if it contains an EOL. This means we * have to do a search, but this is likely going to be * better than having to resize and copy memory later on. */ if (self->buffer && self->length) { const char *p = NULL; const char *q = NULL; p = self->buffer + self->offset; q = memchr(p, '\n', self->length); if (q) size = q - p; } /* * If residual data buffer didn't contain an EOL, all we * can do is allocate a reasonably sized string and if * that isn't big enough keep increasing it in size. For * this we will start out with a buffer 25% greater in * size than what is stored in the residual data buffer * or one the same size as Apache string size, whichever * is greater. */ if (self->buffer && size < 0) { size = self->length; size = size + (size >> 2); } if (size < HUGE_STRING_LEN) size = HUGE_STRING_LEN; /* Allocate string of the initial size. */ result = PyString_FromStringAndSize(NULL, size); if (!result) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); /* Copy any residual data from use of readline(). */ if (self->buffer && self->length) { char *p = NULL; const char *q = NULL; p = buffer; q = self->buffer + self->offset; while (self->length && length < size) { self->offset++; self->length--; length++; if ((*p++ = *q++) == '\n') break; } /* If all data in residual buffer consumed then free it. */ if (!self->length) { free(self->buffer); self->buffer = NULL; } } /* * Read in remaining data until find an EOL, or until all * data has been consumed. */ while ((!length || buffer[length-1] != '\n') && !self->done) { char *p = NULL; char *q = NULL; n = Input_read_from_input(self, buffer+length, size-length); if (n == -1) { Py_DECREF(result); return NULL; } else if (n == 0) { /* Have exhausted all the available input data. */ self->done = 1; } else { /* * Search for embedded EOL in what was read and if * found copy any residual into a buffer for use * next time the read functions are called. */ p = buffer + length; q = p + n; while (p != q) { length++; if (*p++ == '\n') break; } if (p != q) { self->size = q - p; self->buffer = (char *)malloc(self->size); self->offset = 0; self->length = self->size; memcpy(self->buffer, p, self->size); } if (buffer[length-1] != '\n' && length == size) { /* Increase size of string and keep going. */ size = size + (size >> 2); if (_PyString_Resize(&result, size)) return NULL; buffer = PyString_AS_STRING((PyStringObject *)result); } } } /* * Resize the final string. If the size reduction is by * more than 25% of the string size, then Python will * allocate a new block of memory and copy the data into * it. */ if (length != size) { if (_PyString_Resize(&result, length)) return NULL; } } self->bytes += length; return result; } static PyObject *Input_readlines(InputObject *self, PyObject *args) { long hint = 0; long length = 0; PyObject *result = NULL; PyObject *line = NULL; PyObject *rlargs = NULL; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "|l:readlines", &hint)) return NULL; result = PyList_New(0); if (!result) return NULL; rlargs = PyTuple_New(0); if (!rlargs) { Py_DECREF(result); return NULL; } while (1) { long n; if (!(line = Input_readline(self, rlargs))) { Py_DECREF(result); result = NULL; break; } if ((n = PyString_Size(line)) == 0) { Py_DECREF(line); break; } if (PyList_Append(result, line) == -1) { Py_DECREF(line); Py_DECREF(result); result = NULL; break; } Py_DECREF(line); length += n; if (hint > 0 && length >= hint) break; } Py_DECREF(rlargs); return result; } static PyMethodDef Input_methods[] = { { "close", (PyCFunction)Input_close, METH_NOARGS, 0 }, { "read", (PyCFunction)Input_read, METH_VARARGS, 0 }, { "readline", (PyCFunction)Input_readline, METH_VARARGS, 0 }, { "readlines", (PyCFunction)Input_readlines, METH_VARARGS, 0 }, { NULL, NULL} }; static PyObject *Input_iter(InputObject *self) { if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } Py_INCREF(self); return (PyObject *)self; } static PyObject *Input_iternext(InputObject *self) { PyObject *line = NULL; PyObject *rlargs = NULL; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } rlargs = PyTuple_New(0); if (!rlargs) return NULL; line = Input_readline(self, rlargs); Py_DECREF(rlargs); if (!line) return NULL; if (PyString_GET_SIZE(line) == 0) { PyErr_SetObject(PyExc_StopIteration, Py_None); Py_DECREF(line); return NULL; } return line; } static PyTypeObject Input_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Input", /*tp_name*/ sizeof(InputObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Input_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ #if defined(Py_TPFLAGS_HAVE_ITER) Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_ITER, /*tp_flags*/ #else Py_TPFLAGS_DEFAULT, /*tp_flags*/ #endif 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ (getiterfunc)Input_iter, /*tp_iter*/ (iternextfunc)Input_iternext, /*tp_iternext*/ Input_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; typedef struct { PyObject_HEAD int result; request_rec *r; apr_bucket_brigade *bb; WSGIRequestConfig *config; InputObject *input; PyObject *log_buffer; PyObject *log; int status; const char *status_line; PyObject *headers; PyObject *sequence; int content_length_set; apr_off_t content_length; apr_off_t output_length; apr_off_t output_writes; apr_time_t output_time; apr_time_t start_time; } AdapterObject; static PyTypeObject Adapter_Type; static AdapterObject *newAdapterObject(request_rec *r) { AdapterObject *self; self = PyObject_New(AdapterObject, &Adapter_Type); if (self == NULL) return NULL; self->result = HTTP_INTERNAL_SERVER_ERROR; self->r = r; self->bb = NULL; self->config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); self->status = HTTP_INTERNAL_SERVER_ERROR; self->status_line = NULL; self->headers = NULL; self->sequence = NULL; self->content_length_set = 0; self->content_length = 0; self->output_length = 0; self->output_writes = 0; self->output_time = 0; self->input = newInputObject(r, self->config->ignore_activity); self->log_buffer = newLogBufferObject(r, APLOG_ERR, "", 0); self->log = newLogWrapperObject(self->log_buffer); return self; } static void Adapter_dealloc(AdapterObject *self) { Py_XDECREF(self->headers); Py_XDECREF(self->sequence); Py_DECREF(self->input); Py_DECREF(self->log_buffer); Py_DECREF(self->log); PyObject_Del(self); } static PyObject *Adapter_start_response(AdapterObject *self, PyObject *args) { PyObject *result = NULL; PyObject *status_line = NULL; PyObject *headers = NULL; PyObject *exc_info = Py_None; PyObject *status_line_as_bytes = NULL; PyObject *headers_as_bytes = NULL; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "OO!|O:start_response", &status_line, &PyList_Type, &headers, &exc_info)) { return NULL; } if (exc_info != Py_None && !PyTuple_Check(exc_info)) { PyErr_SetString(PyExc_RuntimeError, "exception info must be a tuple"); return NULL; } if (exc_info != Py_None) { if (self->status_line && !self->headers) { PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; if (!PyArg_ParseTuple(exc_info, "OOO", &type, &value, &traceback)) { return NULL; } Py_INCREF(type); Py_INCREF(value); Py_INCREF(traceback); PyErr_Restore(type, value, traceback); return NULL; } } else if (self->status_line && !self->headers) { PyErr_SetString(PyExc_RuntimeError, "headers have already been sent"); return NULL; } /* Publish event for the start of the response. */ if (wsgi_event_subscribers()) { WSGIThreadInfo *thread_info; PyObject *event = NULL; PyObject *value = NULL; thread_info = wsgi_thread_info(0, 0); event = PyDict_New(); #if AP_MODULE_MAGIC_AT_LEAST(20100923,2) if (self->r->log_id) { #if PY_MAJOR_VERSION >= 3 value = PyUnicode_DecodeLatin1(self->r->log_id, strlen(self->r->log_id), NULL); #else value = PyString_FromString(self->r->log_id); #endif PyDict_SetItemString(event, "request_id", value); Py_DECREF(value); } #endif PyDict_SetItemString(event, "response_status", status_line); PyDict_SetItemString(event, "response_headers", headers); PyDict_SetItemString(event, "exception_info", exc_info); PyDict_SetItemString(event, "request_data", thread_info->request_data); wsgi_publish_event("response_started", event); Py_DECREF(event); } status_line_as_bytes = wsgi_convert_status_line_to_bytes(status_line); if (!status_line_as_bytes) goto finally; headers_as_bytes = wsgi_convert_headers_to_bytes(headers); if (!headers_as_bytes) goto finally; self->status_line = apr_pstrdup(self->r->pool, PyString_AsString( status_line_as_bytes)); self->status = (int)strtol(self->status_line, NULL, 10); Py_XDECREF(self->headers); self->headers = headers_as_bytes; Py_INCREF(headers_as_bytes); result = PyObject_GetAttrString((PyObject *)self, "write"); finally: Py_XDECREF(status_line_as_bytes); Py_XDECREF(headers_as_bytes); return result; } static int Adapter_output(AdapterObject *self, const char *data, apr_off_t length, PyObject *string_object, int exception_when_aborted) { int i = 0; apr_status_t rv; request_rec *r; apr_time_t output_start = 0; apr_time_t output_finish = 0; #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_idle_timeout && !self->config->ignore_activity) { apr_thread_mutex_lock(wsgi_monitor_lock); if (wsgi_idle_timeout) { wsgi_idle_shutdown_time = apr_time_now(); wsgi_idle_shutdown_time += wsgi_idle_timeout; } apr_thread_mutex_unlock(wsgi_monitor_lock); } #endif if (!self->status_line) { PyErr_SetString(PyExc_RuntimeError, "response has not been started"); return 0; } r = self->r; /* Remember we started sending this block of output. */ output_start = apr_time_now(); /* Count how many separate blocks have been output. */ if (string_object) self->output_writes++; /* Have response headers yet been sent. */ if (self->headers) { /* * Apache prior to Apache 2.2.8 has a bug in it * whereby it doesn't force '100 Continue' * response before responding with headers if no * read. So, force a zero length read before * sending the headers if haven't yet attempted * to read anything. This will ensure that if no * request content has been read that any '100 * Continue' response will be flushed and sent * back to the client if client was expecting * one. Only want to do this for 2xx and 3xx * status values. Note that even though Apple * supplied version of Apache on MacOS X Leopard * is newer than version 2.2.8, the header file * has never been patched when they make updates * and so anything compiled against it thinks it * is older. */ #if (AP_SERVER_MAJORVERSION_NUMBER == 2 && \ AP_SERVER_MINORVERSION_NUMBER < 2) || \ (AP_SERVER_MAJORVERSION_NUMBER == 2 && \ AP_SERVER_MINORVERSION_NUMBER == 2 && \ AP_SERVER_PATCHLEVEL_NUMBER < 8) if (!self->input->init) { if (self->status >= 200 && self->status < 400) { PyObject *args = NULL; PyObject *result = NULL; args = Py_BuildValue("(i)", 0); result = Input_read(self->input, args); if (PyErr_Occurred()) PyErr_Clear(); Py_DECREF(args); Py_XDECREF(result); } } #endif /* * Now setup the response headers in request object. We * have already converted any native strings in the * headers to byte strings and validated the format of * the header names and values so can skip all the error * checking. */ r->status = self->status; r->status_line = self->status_line; for (i = 0; i < PyList_Size(self->headers); i++) { PyObject *tuple = NULL; PyObject *object1 = NULL; PyObject *object2 = NULL; char *name = NULL; char *value = NULL; tuple = PyList_GetItem(self->headers, i); object1 = PyTuple_GetItem(tuple, 0); object2 = PyTuple_GetItem(tuple, 1); name = PyBytes_AsString(object1); value = PyBytes_AsString(object2); if (!strcasecmp(name, "Content-Type")) { /* * In a daemon child process we cannot call the * function ap_set_content_type() as want to * avoid adding any output filters based on the * type of file being served as this will be * done in the main Apache child process which * proxied the request to the daemon process. */ if (*self->config->process_group) r->content_type = apr_pstrdup(r->pool, value); else ap_set_content_type(r, apr_pstrdup(r->pool, value)); } else if (!strcasecmp(name, "Content-Length")) { char *endstr; apr_off_t length; if (wsgi_strtoff(&length, value, &endstr, 10) || *endstr || length < 0) { PyErr_SetString(PyExc_ValueError, "invalid content length"); output_finish = apr_time_now(); if (output_finish > output_start) self->output_time += (output_finish - output_start); return 0; } ap_set_content_length(r, length); self->content_length_set = 1; self->content_length = length; } else if (!strcasecmp(name, "WWW-Authenticate")) { apr_table_add(r->err_headers_out, name, value); } else { apr_table_add(r->headers_out, name, value); } } /* * Reset flag indicating whether '100 Continue' response * expected. If we don't do this then if an attempt to read * input for the first time is after headers have been * sent, then Apache is wrongly generate the '100 Continue' * response into the response content. Not sure if this is * a bug in Apache, or that it truly believes that input * will never be read after the response headers have been * sent. */ r->expecting_100 = 0; /* No longer need headers now that they have been sent. */ Py_DECREF(self->headers); self->headers = NULL; } /* * If content length was specified, ensure that we don't * actually output more data than was specified as being * sent as otherwise technically in violation of HTTP RFC. */ if (length) { apr_off_t output_length = length; if (self->content_length_set) { if (self->output_length < self->content_length) { if (self->output_length + length > self->content_length) { length = self->content_length - self->output_length; } } else length = 0; } self->output_length += output_length; } /* Now output any data. */ if (length) { apr_bucket *b; /* * When using Apache 2.X can use lower level * bucket brigade APIs. This is preferred as * ap_rwrite()/ap_rflush() will grow memory in * the request pool on each call, which will * result in an increase in memory use over time * when streaming of data is being performed. * The memory is still reclaimed, but only at * the end of the request. Using bucket brigade * API avoids this, and also avoids any copying * of response data due to buffering performed * by ap_rwrite(). */ if (r->connection->aborted) { if (!exception_when_aborted) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r, "mod_wsgi (pid=%d): Client closed connection.", getpid()); } else PyErr_SetString(PyExc_IOError, "Apache/mod_wsgi client " "connection closed."); output_finish = apr_time_now(); if (output_finish > output_start) self->output_time += (output_finish - output_start); return 0; } if (!self->bb) { self->bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); } #if 0 if (string_object) { b = wsgi_apr_bucket_python_create(data, length, self->config->application_group, string_object, r->connection->bucket_alloc); } else { #endif b = apr_bucket_transient_create(data, (apr_size_t)length, r->connection->bucket_alloc); #if 0 } #endif APR_BRIGADE_INSERT_TAIL(self->bb, b); b = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(self->bb, b); Py_BEGIN_ALLOW_THREADS rv = ap_pass_brigade(r->output_filters, self->bb); Py_END_ALLOW_THREADS if (rv != APR_SUCCESS) { char status_buffer[512]; const char *error_message; if (!exception_when_aborted) { error_message = apr_psprintf(r->pool, "Failed to write " "response data: %s", apr_strerror(rv, status_buffer, sizeof(status_buffer)-1)); ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r, "mod_wsgi (pid=%d): %s.", getpid(), error_message); } else { error_message = apr_psprintf(r->pool, "Apache/mod_wsgi " "failed to write response data: %s", apr_strerror(rv, status_buffer, sizeof(status_buffer)-1)); PyErr_SetString(PyExc_IOError, error_message); } output_finish = apr_time_now(); if (output_finish > output_start) self->output_time += (output_finish - output_start); return 0; } Py_BEGIN_ALLOW_THREADS apr_brigade_cleanup(self->bb); Py_END_ALLOW_THREADS } /* Add how much time we spent send this block of output. */ output_finish = apr_time_now(); if (output_finish > output_start) self->output_time += (output_finish - output_start); /* * Check whether aborted connection was found when data * being written, otherwise will not be flagged until next * time that data is being written. Early detection is * better as it may have been the last data block being * written and application may think that data has all * been written. In a streaming application, we also want * to avoid any additional data processing to generate any * successive data. */ if (r->connection->aborted) { if (!exception_when_aborted) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r, "mod_wsgi (pid=%d): Client closed connection.", getpid()); } else PyErr_SetString(PyExc_IOError, "Apache/mod_wsgi client " "connection closed."); return 0; } return 1; } /* Split buckets at 1GB when sending large files. */ #define MAX_BUCKET_SIZE (0x40000000) static int Adapter_output_file(AdapterObject *self, apr_file_t* tmpfile, apr_off_t offset, apr_off_t len) { request_rec *r; apr_bucket *b; apr_status_t rv; apr_bucket_brigade *bb; apr_file_t* dupfile = NULL; r = self->r; if (r->connection->aborted) { PyErr_SetString(PyExc_IOError, "Apache/mod_wsgi client " "connection closed."); return 0; } if (len == 0) return 1; bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); apr_file_dup(&dupfile, tmpfile, r->pool); if (sizeof(apr_off_t) == sizeof(apr_size_t) || len < MAX_BUCKET_SIZE) { /* Can use a single bucket to send file. */ #if 0 b = apr_bucket_file_create(tmpfile, offset, (apr_size_t)len, r->pool, r->connection->bucket_alloc); #endif b = apr_bucket_file_create(dupfile, offset, (apr_size_t)len, r->pool, r->connection->bucket_alloc); } else { /* Need to create multiple buckets to send file. */ #if 0 b = apr_bucket_file_create(tmpfile, offset, MAX_BUCKET_SIZE, r->pool, r->connection->bucket_alloc); #endif b = apr_bucket_file_create(dupfile, offset, MAX_BUCKET_SIZE, r->pool, r->connection->bucket_alloc); while (len > MAX_BUCKET_SIZE) { apr_bucket *cb; apr_bucket_copy(b, &cb); APR_BRIGADE_INSERT_TAIL(bb, cb); b->start += MAX_BUCKET_SIZE; len -= MAX_BUCKET_SIZE; } /* Resize just the last bucket */ b->length = (apr_size_t)len; } APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); Py_BEGIN_ALLOW_THREADS rv = ap_pass_brigade(r->output_filters, bb); Py_END_ALLOW_THREADS if (rv != APR_SUCCESS) { char status_buffer[512]; const char *error_message; error_message = apr_psprintf(r->pool, "Apache/mod_wsgi failed " "to write response data: %s.", apr_strerror(rv, status_buffer, sizeof(status_buffer)-1)); PyErr_SetString(PyExc_IOError, error_message); return 0; } Py_BEGIN_ALLOW_THREADS apr_brigade_destroy(bb); Py_END_ALLOW_THREADS if (r->connection->aborted) { PyErr_SetString(PyExc_IOError, "Apache/mod_wsgi client connection " "closed."); return 0; } return 1; } static APR_OPTIONAL_FN_TYPE(ssl_is_https) *wsgi_is_https = NULL; static PyObject *Adapter_environ(AdapterObject *self) { request_rec *r = NULL; PyObject *vars = NULL; PyObject *object = NULL; const apr_array_header_t *head = NULL; const apr_table_entry_t *elts = NULL; int i = 0; const char *scheme = NULL; /* Create the WSGI environment dictionary. */ vars = PyDict_New(); /* Merge the CGI environment into the WSGI environment. */ r = self->r; head = apr_table_elts(r->subprocess_env); elts = (apr_table_entry_t *)head->elts; for (i = 0; i < head->nelts; ++i) { if (elts[i].key) { if (elts[i].val) { #if PY_MAJOR_VERSION >= 3 if (!strcmp(elts[i].val, "DOCUMENT_ROOT")) { object = PyUnicode_Decode(elts[i].val, strlen(elts[i].val), Py_FileSystemDefaultEncoding, "surrogateescape"); } else if (!strcmp(elts[i].val, "SCRIPT_FILENAME")) { object = PyUnicode_Decode(elts[i].val, strlen(elts[i].val), Py_FileSystemDefaultEncoding, "surrogateescape"); } else { object = PyUnicode_DecodeLatin1(elts[i].val, strlen(elts[i].val), NULL); } #else object = PyString_FromString(elts[i].val); #endif PyDict_SetItemString(vars, elts[i].key, object); Py_DECREF(object); } else PyDict_SetItemString(vars, elts[i].key, Py_None); } } PyDict_DelItemString(vars, "PATH"); /* Now setup all the WSGI specific environment values. */ object = Py_BuildValue("(ii)", 1, 0); PyDict_SetItemString(vars, "wsgi.version", object); Py_DECREF(object); object = PyBool_FromLong(wsgi_multithread); PyDict_SetItemString(vars, "wsgi.multithread", object); Py_DECREF(object); object = PyBool_FromLong(wsgi_multiprocess); PyDict_SetItemString(vars, "wsgi.multiprocess", object); Py_DECREF(object); #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process) { if (wsgi_daemon_process->group->threads == 1 && wsgi_daemon_process->group->maximum_requests == 1) { PyDict_SetItemString(vars, "wsgi.run_once", Py_True); } else PyDict_SetItemString(vars, "wsgi.run_once", Py_False); } else PyDict_SetItemString(vars, "wsgi.run_once", Py_False); #else PyDict_SetItemString(vars, "wsgi.run_once", Py_False); #endif scheme = apr_table_get(r->subprocess_env, "HTTPS"); if (scheme && (!strcasecmp(scheme, "On") || !strcmp(scheme, "1"))) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString("https"); #else object = PyString_FromString("https"); #endif PyDict_SetItemString(vars, "wsgi.url_scheme", object); Py_DECREF(object); } else { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString("http"); #else object = PyString_FromString("http"); #endif PyDict_SetItemString(vars, "wsgi.url_scheme", object); Py_DECREF(object); } /* * We remove the HTTPS variable because WSGI compliant * applications shouldn't rely on it. Instead they should * use wsgi.url_scheme. We do this even if SetEnv was * used to set HTTPS from Apache configuration. That is * we convert it into the correct variable and remove the * original. */ if (scheme) PyDict_DelItemString(vars, "HTTPS"); /* * Setup log object for WSGI errors. Don't decrement * reference to log object as keep reference to it. */ object = (PyObject *)self->log; PyDict_SetItemString(vars, "wsgi.errors", object); /* Setup input object for request content. */ object = (PyObject *)self->input; PyDict_SetItemString(vars, "wsgi.input", object); PyDict_SetItemString(vars, "wsgi.input_terminated", Py_True); /* Setup file wrapper object for efficient file responses. */ PyDict_SetItemString(vars, "wsgi.file_wrapper", (PyObject *)&Stream_Type); /* Add Apache and mod_wsgi version information. */ object = Py_BuildValue("(iii)", AP_SERVER_MAJORVERSION_NUMBER, AP_SERVER_MINORVERSION_NUMBER, AP_SERVER_PATCHLEVEL_NUMBER); PyDict_SetItemString(vars, "apache.version", object); Py_DECREF(object); object = Py_BuildValue("(iii)", MOD_WSGI_MAJORVERSION_NUMBER, MOD_WSGI_MINORVERSION_NUMBER, MOD_WSGI_MICROVERSION_NUMBER); PyDict_SetItemString(vars, "mod_wsgi.version", object); Py_DECREF(object); /* * If Apache extensions are enabled and running in embedded * mode add a CObject reference to the Apache request_rec * structure instance. */ if (!wsgi_daemon_pool && self->config->pass_apache_request) { #if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2) || \ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 7) object = PyCapsule_New(self->r, 0, 0); #else object = PyCObject_FromVoidPtr(self->r, 0); #endif PyDict_SetItemString(vars, "apache.request_rec", object); Py_DECREF(object); } /* * Extensions for accessing SSL certificate information from * mod_ssl when in use. */ #if 0 if (!wsgi_daemon_pool) { object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https"); PyDict_SetItemString(vars, "mod_ssl.is_https", object); Py_DECREF(object); object = PyObject_GetAttrString((PyObject *)self, "ssl_var_lookup"); PyDict_SetItemString(vars, "mod_ssl.var_lookup", object); Py_DECREF(object); } #endif return vars; } static int Adapter_process_file_wrapper(AdapterObject *self) { int done = 0; #ifndef WIN32 PyObject *filelike = NULL; PyObject *method = NULL; PyObject *object = NULL; apr_status_t rv = 0; apr_os_file_t fd = -1; apr_file_t *tmpfile = NULL; apr_finfo_t finfo; apr_off_t fd_offset = 0; apr_off_t fo_offset = 0; apr_off_t length = 0; /* Perform file wrapper optimisations where possible. */ if (!PyObject_IsInstance(self->sequence, (PyObject *)&Stream_Type)) return 0; /* * Only attempt to perform optimisations if the * write() function returned by start_response() * function has not been called with non zero length * data. In other words if no prior response content * generated. Technically it could be done, but want * to have a consistent rule about how specifying a * content length affects how much of a file is * sent. Don't want to have to take into * consideration whether write() function has been * called or not as just complicates things. */ if (self->output_length != 0) return 0; /* * Work out if file wrapper is associated with a * file like object, where that file object is * associated with a regular file. If it does then * we can optimise how the contents of the file are * sent out. If no such associated file descriptor * then it needs to be processed like any other * iterable value. */ filelike = PyObject_GetAttrString((PyObject *)self->sequence, "filelike"); if (!filelike) { PyErr_SetString(PyExc_KeyError, "file wrapper no filelike attribute"); return 0; } fd = PyObject_AsFileDescriptor(filelike); if (fd == -1) { PyErr_Clear(); Py_DECREF(filelike); return 0; } Py_DECREF(filelike); /* * On some platforms, such as Linux, sendfile() system call * will not work on UNIX sockets. Thus when using daemon mode * cannot enable that feature. */ if (self->config->enable_sendfile) apr_os_file_put(&tmpfile, &fd, APR_SENDFILE_ENABLED, self->r->pool); else apr_os_file_put(&tmpfile, &fd, 0, self->r->pool); rv = apr_file_info_get(&finfo, APR_FINFO_SIZE|APR_FINFO_TYPE, tmpfile); if (rv != APR_SUCCESS || finfo.filetype != APR_REG) return 0; /* * Because Python file like objects potentially have * their own buffering layering, or use an operating * system FILE object which also has a buffering * layer on top of a normal file descriptor, need to * determine from the file like object its position * within the file and use that as starting position. * Note that it is assumed that user had flushed any * modifications to the file as necessary. Also, we * need to make sure we remember the original file * descriptor position as will need to restore that * position so it matches the upper buffering layers * when done. This is done to avoid any potential * problems if file like object does anything strange * in its close() method which relies on file position * being what it thought it should be. */ rv = apr_file_seek(tmpfile, APR_CUR, &fd_offset); if (rv != APR_SUCCESS) return 0; method = PyObject_GetAttrString(filelike, "tell"); if (!method) return 0; object = PyObject_CallObject(method, NULL); Py_DECREF(method); if (!object) { PyErr_Clear(); return 0; } if (PyLong_Check(object)) { #if defined(HAVE_LONG_LONG) fo_offset = PyLong_AsLongLong(object); #else fo_offset = PyLong_AsLong(object); #endif } #if PY_MAJOR_VERSION < 3 else if (PyInt_Check(object)) { fo_offset = PyInt_AsLong(object); } #endif else { Py_DECREF(object); return 0; } if (PyErr_Occurred()){ Py_DECREF(object); PyErr_Clear(); return 0; } Py_DECREF(object); /* * For a file wrapper object need to always ensure * that response headers are parsed. This is done so * that if the content length header has been * defined we can get its value and use it to limit * how much of a file is being sent. The WSGI 1.0 * specification says that we are meant to send all * available bytes from the file, however this is * questionable as sending more than content length * would violate HTTP RFC. Note that this doesn't * actually flush the headers out when using Apache * 2.X. This is good, as we want to still be able to * set the content length header if none set and file * is seekable. If processing response headers fails, * then need to return as if done, with error being * logged later. */ if (!Adapter_output(self, "", 0, NULL, 0)) return 1; /* * If content length wasn't defined then determine * the amount of data which is available to send and * set the content length response header. Either * way, if can work out length then send data * otherwise fall through and treat it as normal * iterable. */ if (!self->content_length_set) { length = finfo.size - fo_offset; self->output_length += length; ap_set_content_length(self->r, length); self->content_length_set = 1; self->content_length = length; if (Adapter_output_file(self, tmpfile, fo_offset, length)) self->result = OK; done = 1; } else { length = finfo.size - fo_offset; self->output_length += length; /* Use user specified content length instead. */ length = self->content_length; if (Adapter_output_file(self, tmpfile, fo_offset, length)) self->result = OK; done = 1; } /* * Restore position of underlying file descriptor. * If this fails, then not much we can do about it. */ apr_file_seek(tmpfile, APR_SET, &fd_offset); #endif return done; } static int Adapter_run(AdapterObject *self, PyObject *object) { PyObject *vars = NULL; PyObject *start = NULL; PyObject *args = NULL; PyObject *iterator = NULL; PyObject *close = NULL; PyObject *nrwrapper = NULL; PyObject *evwrapper = NULL; PyObject *value = NULL; PyObject *event = NULL; const char *msg = NULL; apr_off_t length = 0; WSGIThreadInfo *thread_handle = NULL; apr_time_t finish_time; WSGIThreadCPUUsage start_usage; WSGIThreadCPUUsage end_usage; int aborted = 0; #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_idle_timeout && !self->config->ignore_activity) { apr_thread_mutex_lock(wsgi_monitor_lock); if (wsgi_idle_timeout) { wsgi_idle_shutdown_time = apr_time_now(); wsgi_idle_shutdown_time += wsgi_idle_timeout; } apr_thread_mutex_unlock(wsgi_monitor_lock); } #endif if (wsgi_newrelic_config_file) { PyObject *module = NULL; module = PyImport_ImportModule("newrelic.agent"); if (module) { PyObject *dict; PyObject *factory; dict = PyModule_GetDict(module); factory = PyDict_GetItemString(dict, "WSGIApplicationWrapper"); if (factory) { Py_INCREF(factory); nrwrapper = PyObject_CallFunctionObjArgs( factory, object, Py_None, NULL); if (!nrwrapper) { wsgi_log_python_error(self->r, self->log, self->r->filename, 0); PyErr_Clear(); } Py_DECREF(factory); } Py_DECREF(module); } } if (nrwrapper) object = nrwrapper; self->start_time = apr_time_now(); apr_table_setn(self->r->subprocess_env, "mod_wsgi.script_start", apr_psprintf(self->r->pool, "%" APR_TIME_T_FMT, self->start_time)); vars = Adapter_environ(self); value = wsgi_PyInt_FromLongLong(wsgi_total_requests); PyDict_SetItemString(vars, "mod_wsgi.total_requests", value); Py_DECREF(value); thread_handle = wsgi_thread_info(1, 1); value = wsgi_PyInt_FromLong(thread_handle->thread_id); PyDict_SetItemString(vars, "mod_wsgi.thread_id", value); Py_DECREF(value); value = wsgi_PyInt_FromLongLong(thread_handle->request_count); PyDict_SetItemString(vars, "mod_wsgi.thread_requests", value); Py_DECREF(value); /* Publish event for the start of the request. */ start_usage.user_time = 0.0; start_usage.system_time = 0.0; if (wsgi_event_subscribers()) { wsgi_thread_cpu_usage(&start_usage); event = PyDict_New(); #if AP_MODULE_MAGIC_AT_LEAST(20100923,2) if (self->r->log_id) { #if PY_MAJOR_VERSION >= 3 value = PyUnicode_DecodeLatin1(self->r->log_id, strlen(self->r->log_id), NULL); #else value = PyString_FromString(self->r->log_id); #endif PyDict_SetItemString(event, "request_id", value); Py_DECREF(value); } #endif value = wsgi_PyInt_FromLong(thread_handle->thread_id); PyDict_SetItemString(event, "thread_id", value); Py_DECREF(value); value = wsgi_PyInt_FromLong(self->config->daemon_connects); PyDict_SetItemString(event, "daemon_connects", value); Py_DECREF(value); value = wsgi_PyInt_FromLong(self->config->daemon_restarts); PyDict_SetItemString(event, "daemon_restarts", value); Py_DECREF(value); value = PyFloat_FromDouble(apr_time_sec( (double)self->config->request_start)); PyDict_SetItemString(event, "request_start", value); Py_DECREF(value); value = PyFloat_FromDouble(apr_time_sec( (double)self->config->queue_start)); PyDict_SetItemString(event, "queue_start", value); Py_DECREF(value); value = PyFloat_FromDouble(apr_time_sec( (double)self->config->daemon_start)); PyDict_SetItemString(event, "daemon_start", value); Py_DECREF(value); PyDict_SetItemString(event, "application_object", object); PyDict_SetItemString(event, "request_environ", vars); value = PyFloat_FromDouble(apr_time_sec((double)self->start_time)); PyDict_SetItemString(event, "application_start", value); Py_DECREF(value); PyDict_SetItemString(event, "request_data", thread_handle->request_data); wsgi_publish_event("request_started", event); evwrapper = PyDict_GetItemString(event, "application_object"); if (evwrapper) { if (evwrapper != object) { Py_INCREF(evwrapper); object = evwrapper; } else evwrapper = NULL; } Py_DECREF(event); } /* Pass the request through to the WSGI application. */ thread_handle->request_count++; start = PyObject_GetAttrString((PyObject *)self, "start_response"); args = Py_BuildValue("(OO)", vars, start); self->sequence = PyObject_CallObject(object, args); if (self->sequence != NULL) { if (!Adapter_process_file_wrapper(self)) { iterator = PyObject_GetIter(self->sequence); if (iterator != NULL) { PyObject *item = NULL; while ((item = PyIter_Next(iterator))) { if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "sequence of byte " "string values expected, value of " "type %.200s found", item->ob_type->tp_name); Py_DECREF(item); break; } msg = PyString_AsString(item); length = PyString_Size(item); if (!msg) { Py_DECREF(item); break; } if (length && !Adapter_output(self, msg, length, item, 0)) { if (!PyErr_Occurred()) aborted = 1; Py_DECREF(item); break; } Py_DECREF(item); } } if (!PyErr_Occurred()) { if (!aborted) { /* * In the case where the response was empty we * need to ensure we explicitly flush out the * headers. This is done by calling the output * routine but with an empty string as content. * This could be gated on whether any content * had already been sent, but easier to just call * it all the time. */ if (Adapter_output(self, "", 0, NULL, 0)) self->result = OK; } else { /* * If the client connection was already marked * as aborted, then it indicates the client has * closed the connection. In this case mark the * final result as okay rather than an error so * that the access log still records the original * HTTP response code for the request rather than * overriding it. If don't do this then access * log will show 500 when the WSGI application * itself had run fine. */ self->result = OK; } } Py_XDECREF(iterator); } /* * Log warning if more response content generated than was * indicated, or less, if there was no errors generated by * the application and connection wasn't aborted. */ if (self->content_length_set && ((!PyErr_Occurred() && !aborted && self->output_length != self->content_length) || (self->output_length > self->content_length))) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, self->r, "mod_wsgi (pid=%d): Content length mismatch, " "expected %s, response generated %s: %s", getpid(), apr_off_t_toa(self->r->pool, self->content_length), apr_off_t_toa(self->r->pool, self->output_length), self->r->filename); } if (PyErr_Occurred()) { /* * Response content has already been sent, so cannot * return an internal server error as Apache will * append its own error page. Thus need to return OK * and just truncate the response. */ if (self->status_line && !self->headers) self->result = OK; wsgi_log_python_error(self->r, self->log, self->r->filename, 1); /* * If response content is being chunked and an error * occurred, we need to prevent the sending of the EOS * bucket so a client is able to detect that the the * response was incomplete. */ if (self->r->chunked) self->r->eos_sent = 1; } if (PyObject_HasAttrString(self->sequence, "close")) { PyObject *args = NULL; PyObject *data = NULL; close = PyObject_GetAttrString(self->sequence, "close"); args = Py_BuildValue("()"); data = PyObject_CallObject(close, args); Py_DECREF(args); Py_XDECREF(data); Py_DECREF(close); } if (PyErr_Occurred()) wsgi_log_python_error(self->r, self->log, self->r->filename, 1); } else wsgi_log_python_error(self->r, self->log, self->r->filename, 1); /* Publish event for the end of the request. */ finish_time = apr_time_now(); if (wsgi_event_subscribers()) { double application_time = 0.0; double output_time = 0.0; event = PyDict_New(); #if AP_MODULE_MAGIC_AT_LEAST(20100923,2) if (self->r->log_id) { #if PY_MAJOR_VERSION >= 3 value = PyUnicode_DecodeLatin1(self->r->log_id, strlen(self->r->log_id), NULL); #else value = PyString_FromString(self->r->log_id); #endif PyDict_SetItemString(event, "request_id", value); Py_DECREF(value); } #endif value = wsgi_PyInt_FromLongLong(self->input->reads); PyDict_SetItemString(event, "input_reads", value); Py_DECREF(value); value = wsgi_PyInt_FromLongLong(self->input->bytes); PyDict_SetItemString(event, "input_length", value); Py_DECREF(value); value = PyFloat_FromDouble(apr_time_sec((double)self->input->time)); PyDict_SetItemString(event, "input_time", value); Py_DECREF(value); value = wsgi_PyInt_FromLongLong(self->output_length); PyDict_SetItemString(event, "output_length", value); Py_DECREF(value); value = wsgi_PyInt_FromLongLong(self->output_writes); PyDict_SetItemString(event, "output_writes", value); Py_DECREF(value); output_time = apr_time_sec((double)self->output_time); if (output_time < 0.0) output_time = 0.0; application_time = apr_time_sec((double)finish_time-self->start_time); if (application_time < 0.0) application_time = 0.0; if (start_usage.user_time != 0.0) { if (wsgi_thread_cpu_usage(&end_usage)) { double user_seconds; double system_seconds; double total_seconds; user_seconds = end_usage.user_time; user_seconds -= start_usage.user_time; if (user_seconds < 0.0) user_seconds = 0.0; system_seconds = end_usage.system_time; system_seconds -= start_usage.system_time; if (system_seconds < 0.0) system_seconds = 0.0; total_seconds = user_seconds + system_seconds; if (total_seconds && total_seconds > application_time) { user_seconds = (user_seconds/total_seconds)*application_time; system_seconds = application_time - user_seconds; } value = PyFloat_FromDouble(user_seconds); PyDict_SetItemString(event, "cpu_user_time", value); Py_DECREF(value); value = PyFloat_FromDouble(system_seconds); PyDict_SetItemString(event, "cpu_system_time", value); Py_DECREF(value); } } value = PyFloat_FromDouble(output_time); PyDict_SetItemString(event, "output_time", value); Py_DECREF(value); value = PyFloat_FromDouble(apr_time_sec((double)finish_time)); PyDict_SetItemString(event, "application_finish", value); Py_DECREF(value); value = PyFloat_FromDouble(application_time); PyDict_SetItemString(event, "application_time", value); Py_DECREF(value); PyDict_SetItemString(event, "request_data", thread_handle->request_data); wsgi_publish_event("request_finished", event); Py_DECREF(event); } /* * Record server and application time for metrics. Values * are the time request first accepted by child workers, * the time that the WSGI application started processing * the request, and when the WSGI application finished the * request. */ wsgi_record_request_times(self->config->request_start, self->config->queue_start, self->config->daemon_start, self->start_time, finish_time); /* * If result indicates an internal server error, then * replace the status line in the request object else * that provided by the application will be what is used * in any error page automatically generated by Apache. */ if (self->result == HTTP_INTERNAL_SERVER_ERROR) self->r->status_line = "500 Internal Server Error"; Py_DECREF(args); Py_DECREF(start); Py_DECREF(vars); Py_XDECREF(nrwrapper); Py_XDECREF(evwrapper); Py_XDECREF(self->sequence); self->sequence = NULL; return self->result; } static PyObject *Adapter_write(AdapterObject *self, PyObject *args) { PyObject *item = NULL; const char *data = NULL; long length = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:write", &item)) return NULL; if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "byte string value expected, value " "of type %.200s found", item->ob_type->tp_name); return NULL; } data = PyString_AsString(item); length = PyString_Size(item); if (!Adapter_output(self, data, length, item, 1)) { return NULL; } Py_INCREF(Py_None); return Py_None; } static PyObject *Adapter_ssl_is_https(AdapterObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, ":ssl_is_https")) return NULL; ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (ssl_is_https == 0) return Py_BuildValue("i", 0); return Py_BuildValue("i", ssl_is_https(self->r->connection)); } static PyObject *Adapter_ssl_var_lookup(AdapterObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *ssl_var_lookup = 0; PyObject *item = NULL; PyObject *latin_item = NULL; char *name = 0; char *value = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:ssl_var_lookup", &item)) return NULL; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "byte string value expected, " "value containing non 'latin-1' characters found"); return NULL; } item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "byte string value expected, value " "of type %.200s found", item->ob_type->tp_name); Py_XDECREF(latin_item); return NULL; } name = PyString_AsString(item); ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); if (ssl_var_lookup == 0) { Py_XDECREF(latin_item); Py_INCREF(Py_None); return Py_None; } value = ssl_var_lookup(self->r->pool, self->r->server, self->r->connection, self->r, name); Py_XDECREF(latin_item); if (!value) { Py_INCREF(Py_None); return Py_None; } #if PY_MAJOR_VERSION >= 3 return PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else return PyString_FromString(value); #endif } static PyMethodDef Adapter_methods[] = { { "start_response", (PyCFunction)Adapter_start_response, METH_VARARGS, 0 }, { "write", (PyCFunction)Adapter_write, METH_VARARGS, 0 }, { "ssl_is_https", (PyCFunction)Adapter_ssl_is_https, METH_VARARGS, 0 }, { "ssl_var_lookup", (PyCFunction)Adapter_ssl_var_lookup, METH_VARARGS, 0 }, { NULL, NULL} }; static PyTypeObject Adapter_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Adapter", /*tp_name*/ sizeof(AdapterObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Adapter_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ Adapter_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; /* * Code for importing a module from source by absolute path. */ static PyObject *wsgi_load_source(apr_pool_t *pool, request_rec *r, const char *name, int exists, const char* filename, const char *process_group, const char *application_group, int ignore_system_exit) { PyObject *m = NULL; PyObject *co = NULL; PyObject *io_module = NULL; PyObject *fileobject = NULL; PyObject *source_bytes_object = NULL; PyObject *result = NULL; char *source_buf = NULL; if (exists) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Reloading WSGI script '%s'.", getpid(), process_group, application_group, filename); } else { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Reloading WSGI script '%s'.", getpid(), process_group, application_group, filename); } Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Loading Python script file '%s'.", getpid(), process_group, application_group, filename); } else { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Loading Python script file '%s'.", getpid(), process_group, application_group, filename); } Py_END_ALLOW_THREADS } io_module = PyImport_ImportModule("io"); if (!io_module) goto load_source_finally; fileobject = PyObject_CallMethod(io_module, "open", "ss", filename, "rb"); if (!fileobject) goto load_source_finally; source_bytes_object = PyObject_CallMethod(fileobject, "read", ""); if (!source_bytes_object) goto load_source_finally; result = PyObject_CallMethod(fileobject, "close", ""); if (!result) goto load_source_finally; source_buf = PyBytes_AsString(source_bytes_object); if (!source_buf) goto load_source_finally; co = Py_CompileString(source_buf, filename, Py_file_input); load_source_finally: if (!co) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, APLOG_ERR, errno, r, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Could not read/compile source file '%s'.", getpid(), process_group, application_group, filename); } else { ap_log_error(APLOG_MARK, APLOG_ERR, errno, wsgi_server, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Could not read/compile source file '%s'.", getpid(), process_group, application_group, filename); } Py_END_ALLOW_THREADS wsgi_log_python_error(r, NULL, filename, 0); Py_XDECREF(io_module); Py_XDECREF(fileobject); Py_XDECREF(source_bytes_object); Py_XDECREF(result); return NULL; } Py_XDECREF(io_module); Py_XDECREF(fileobject); Py_XDECREF(source_bytes_object); Py_XDECREF(result); m = PyImport_ExecCodeModuleEx((char *)name, co, (char *)filename); if (m) { PyObject *object = NULL; if (!r || strcmp(r->filename, filename)) { apr_finfo_t finfo; apr_status_t status; Py_BEGIN_ALLOW_THREADS status = apr_stat(&finfo, filename, APR_FINFO_NORM, pool); Py_END_ALLOW_THREADS if (status != APR_SUCCESS) object = PyLong_FromLongLong(0); else object = PyLong_FromLongLong(finfo.mtime); } else { object = PyLong_FromLongLong(r->finfo.mtime); } PyModule_AddObject(m, "__mtime__", object); } else { if (PyErr_ExceptionMatches(PyExc_SystemExit)) { if (!ignore_system_exit) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): SystemExit exception " "raised when doing exec of Python script " "file '%s'.", getpid(), filename); } else { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): SystemExit exception " "raised when doing exec of Python script " "file '%s'.", getpid(), filename); } Py_END_ALLOW_THREADS } } else { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Failed to exec Python script " "file '%s'.", getpid(), filename); } else { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Failed to exec Python script " "file '%s'.", getpid(), filename); } Py_END_ALLOW_THREADS wsgi_log_python_error(r, NULL, filename, 0); } } Py_XDECREF(co); return m; } static int wsgi_reload_required(apr_pool_t *pool, request_rec *r, const char *filename, PyObject *module, const char *resource) { PyObject *dict = NULL; PyObject *object = NULL; apr_time_t mtime = 0; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "__mtime__"); if (object) { mtime = PyLong_AsLongLong(object); if (!r || strcmp(r->filename, filename)) { apr_finfo_t finfo; apr_status_t status; Py_BEGIN_ALLOW_THREADS status = apr_stat(&finfo, filename, APR_FINFO_NORM, pool); Py_END_ALLOW_THREADS if (status != APR_SUCCESS) return 1; else if (mtime != finfo.mtime) return 1; } else { if (mtime != r->finfo.mtime) return 1; } } else return 1; if (resource) { PyObject *dict = NULL; PyObject *object = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "reload_required"); if (object) { PyObject *args = NULL; PyObject *result = NULL; #if PY_MAJOR_VERSION >= 3 PyObject *path = NULL; #endif Py_INCREF(object); #if PY_MAJOR_VERSION >= 3 path = PyUnicode_Decode(resource, strlen(resource), Py_FileSystemDefaultEncoding, "surrogateescape"); args = Py_BuildValue("(O)", path); Py_DECREF(path); #else args = Py_BuildValue("(s)", resource); #endif result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); if (result && PyObject_IsTrue(result)) { Py_DECREF(result); return 1; } if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, filename, 0); Py_XDECREF(result); } } return 0; } static char *wsgi_module_name(apr_pool_t *pool, const char *filename) { char *hash = NULL; char *file = NULL; /* * Calculate a name for the module using the MD5 of its full * pathname. This is so that different code files with the * same basename are still considered unique. Note that where * we believe a case insensitive file system is being used, * we always change the file name to lower case so that use * of different case in name doesn't result in duplicate * modules being loaded for the same file. */ file = (char *)filename; if (wsgi_server_config->case_sensitivity) { file = apr_pstrdup(pool, file); ap_str_tolower(file); } hash = ap_md5(pool, (const unsigned char *)file); return apr_pstrcat(pool, "_mod_wsgi_", hash, NULL); } #if APR_HAS_THREADS static apr_thread_mutex_t* wsgi_module_lock = NULL; #endif static int wsgi_execute_script(request_rec *r) { WSGIRequestConfig *config = NULL; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; const char *script = NULL; const char *name = NULL; int exists = 0; int status; WSGIThreadInfo *thread_info = NULL; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ interp = wsgi_acquire_interpreter(config->application_group); if (!interp) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), config->application_group); return HTTP_INTERNAL_SERVER_ERROR; } /* Setup startup timeout if first request and specified. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process) { if (wsgi_startup_shutdown_time == 0) { if (wsgi_startup_timeout > 0) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Application startup " "timer triggered '%s'.", getpid(), config->process_group); apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_startup_shutdown_time = apr_time_now(); wsgi_startup_shutdown_time += wsgi_startup_timeout; apr_thread_mutex_unlock(wsgi_monitor_lock); } } } #endif /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif /* Calculate the Python module name to be used for script. */ if (config->handler_script && *config->handler_script) { script = config->handler_script; #if 0 /* * Check for whether a module reference is provided * as opposed to a filesystem path. */ if (strlen(script) > 2 && script[0] == '(' && script[strlen(script)-1] == ')') { name = apr_pstrndup(r->pool, script+1, strlen(script)-2); module = PyImport_ImportModule(name); if (!module) { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Failed to import handler " "via Python module reference %s.", getpid(), script); Py_END_ALLOW_THREADS wsgi_log_python_error(r, NULL, r->filename, 0); } } #endif } else script = r->filename; if (!module) { name = wsgi_module_name(r->pool, script); modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. For a handler script will * also see if it contains a custom function for determining * if a reload should be performed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, r->filename)) { /* * Script file has changed. Discard reference to * loaded module and work out what action we are * supposed to take. Choices are process reloading * and module reloading. Process reloading cannot be * performed unless a daemon process is being used. */ Py_DECREF(module); module = NULL; #if defined(MOD_WSGI_WITH_DAEMONS) if (*config->process_group) { /* * Need to restart the daemon process. We bail * out on the request process here, sending back * a special response header indicating that * process is being restarted and that remote * end should abandon connection and attempt to * reconnect again. We also need to signal this * process so it will actually shutdown. The * process supervisor code will ensure that it * is restarted. */ Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, "mod_wsgi (pid=%d): Force restart of " "process '%s'.", getpid(), config->process_group); Py_END_ALLOW_THREADS #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif wsgi_release_interpreter(interp); r->status = HTTP_INTERNAL_SERVER_ERROR; r->status_line = "200 Rejected"; wsgi_daemon_shutdown++; kill(getpid(), SIGINT); return OK; } else { /* * Need to reload just the script module. Remove * the module from the modules dictionary before * reloading it again. If code is executing * within the module at the time, the callers * reference count on the module should ensure * it isn't actually destroyed until it is * finished. */ PyDict_DelItemString(modules, name); } #else /* * Need to reload just the script module. Remove * the module from the modules dictionary before * reloading it again. If code is executing * within the module at the time, the callers * reference count on the module should ensure * it isn't actually destroyed until it is * finished. */ PyDict_DelItemString(modules, name); #endif } } } /* * When process reloading is in use, or a queue timeout is * set, need to indicate that request content should now be * sent through. This is done by writing a special response * header directly out onto the appropriate network output * filter. The special response is picked up by remote end * and data will then be sent. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (*config->process_group && (config->script_reloading || wsgi_daemon_process->group->queue_timeout != 0)) { ap_filter_t *filters; apr_bucket_brigade *bb; apr_bucket *b; const char *data = "Status: 200 Continue\r\n\r\n"; long length = strlen(data); Py_BEGIN_ALLOW_THREADS filters = r->output_filters; while (filters && filters->frec->ftype != AP_FTYPE_NETWORK) { filters = filters->next; } bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); b = apr_bucket_transient_create(data, length, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_flush_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); /* * This should always work, so ignore any errors * from passing the brigade to the network * output filter. If there are are problems they * will be picked up further down in processing * anyway. */ ap_pass_brigade(filters, bb); Py_END_ALLOW_THREADS } #endif /* Setup metrics for start of request. */ thread_info = wsgi_start_request(r); /* Load module if not already loaded. */ if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, config->process_group, config->application_group, 0); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* * Clear startup timeout and prevent from running again if the * module was successfully loaded. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (module && wsgi_startup_shutdown_time > 0) { wsgi_startup_shutdown_time = -1; ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Application startup " "timer cancelled '%s'.", getpid(), config->process_group); } #endif /* Assume an internal server error unless everything okay. */ status = HTTP_INTERNAL_SERVER_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, config->callable_object); if (object) { AdapterObject *adapter = NULL; adapter = newAdapterObject(r); if (adapter) { PyObject *method = NULL; PyObject *args = NULL; Py_INCREF(adapter->log_buffer); thread_info->log_buffer = adapter->log_buffer; Py_INCREF(object); status = Adapter_run(adapter, object); Py_DECREF(object); /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; Input_finish(adapter->input); /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); object = PyObject_CallObject(method, args); Py_DECREF(args); } Py_XDECREF(object); Py_XDECREF(method); Py_CLEAR(thread_info->log_buffer); adapter->bb = NULL; } Py_XDECREF((PyObject *)adapter); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Target WSGI script '%s' does " "not contain WSGI application '%s'.", getpid(), script, config->callable_object); Py_END_ALLOW_THREADS status = HTTP_NOT_FOUND; } } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, r->filename, 0); Py_XDECREF(module); /* Finalise any metrics at end of the request. */ wsgi_end_request(); /* Cleanup and release interpreter, */ wsgi_release_interpreter(interp); return status; } /* * Apache child process initialisation and cleanup. Initialise * global table containing Python interpreter instances and * cache reference to main interpreter. Also register cleanup * function to delete interpreter on process shutdown. */ static apr_status_t wsgi_python_child_cleanup(void *data) { PyObject *interp = NULL; /* * If not a daemon process need to publish that process * is shutting down here. For daemon we did it earlier * before trying to wait on request threads. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (!wsgi_daemon_process) wsgi_publish_process_stopping(wsgi_shutdown_reason); #else wsgi_publish_process_stopping(wsgi_shutdown_reason); #endif /* Skip destruction of Python interpreter. */ if (wsgi_server_config->destroy_interpreter == 0) return APR_SUCCESS; /* In a multithreaded MPM must protect table. */ #if APR_HAS_THREADS apr_thread_mutex_lock(wsgi_interp_lock); #endif /* * We should be executing in the main thread again at this * point but without the GIL, so simply restore the original * thread state for that thread that we remembered when we * initialised the interpreter. */ PyEval_AcquireThread(wsgi_main_tstate); /* * Extract a handle to the main Python interpreter from * interpreters dictionary as want to process that one last. */ interp = PyDict_GetItemString(wsgi_interpreters, ""); Py_INCREF(interp); /* * Remove all items from interpreters dictionary. This will * have side affect of calling any exit functions and * destroying interpreters we own. */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Destroying interpreters.", getpid()); PyDict_Clear(wsgi_interpreters); #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_interp_lock); #endif /* * Now we decrement reference on handle for main Python * interpreter. This only causes exit functions to be called * and doesn't result in interpreter being destroyed as we * we didn't previously mark ourselves as the owner of the * interpreter. Note that when Python as a whole is later * being destroyed it will also call exit functions, but by * then the exit function registrations have been removed * and so they will not actually be run a second time. */ Py_DECREF(interp); /* * The code which performs actual shutdown of the main * interpreter expects to be called without the GIL, so * we release it here again. */ PyEval_ReleaseThread(wsgi_main_tstate); /* * Destroy Python itself including the main interpreter. * If mod_python is being loaded it is left to mod_python to * destroy Python, although it currently doesn't do so. */ if (wsgi_python_initialized) wsgi_python_term(); return APR_SUCCESS; } static void wsgi_python_child_init(apr_pool_t *p) { PyGILState_STATE state; PyObject *object = NULL; int ignore_system_exit = 0; /* Working with Python, so must acquire GIL. */ state = PyGILState_Ensure(); /* * Trigger any special Python stuff required after a fork. * Only do this though if we were responsible for the * initialisation of the Python interpreter in the first * place to avoid it being done multiple times. Also only * do it if Python was initialised in parent process. */ #ifdef HAVE_FORK if (wsgi_python_initialized && !wsgi_python_after_fork) { #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 7) PyOS_AfterFork_Child(); #else PyOS_AfterFork(); #endif } #endif /* Finalise any Python objects required by child process. */ PyType_Ready(&Log_Type); PyType_Ready(&Stream_Type); PyType_Ready(&Input_Type); PyType_Ready(&Adapter_Type); PyType_Ready(&Restricted_Type); PyType_Ready(&Interpreter_Type); PyType_Ready(&Dispatch_Type); PyType_Ready(&Auth_Type); PyType_Ready(&SignalIntercept_Type); #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 4) PyType_Ready(&ShutdownInterpreter_Type); #endif /* Initialise Python interpreter instance table and lock. */ wsgi_interpreters = PyDict_New(); #if APR_HAS_THREADS apr_thread_mutex_create(&wsgi_interp_lock, APR_THREAD_MUTEX_UNNESTED, p); apr_thread_mutex_create(&wsgi_module_lock, APR_THREAD_MUTEX_UNNESTED, p); apr_thread_mutex_create(&wsgi_shutdown_lock, APR_THREAD_MUTEX_UNNESTED, p); #endif /* * Create an interpreters index using Apache data structure so * can iterate over interpreter names without needing Python GIL. */ wsgi_interpreters_index = apr_hash_make(p); /* * Initialise the key for data related to a thread and force * creation of thread info. */ apr_threadkey_private_create(&wsgi_thread_key, NULL, p); wsgi_thread_info(1, 0); /* * Cache a reference to the first Python interpreter * instance. This interpreter is special as some third party * Python modules will only work when used from within this * interpreter. This is generally when they use the Python * simplified GIL API or otherwise don't use threading API * properly. An empty string for name is used to identify * the first Python interpreter instance. */ object = (PyObject *)newInterpreterObject(NULL); PyDict_SetItemString(wsgi_interpreters, "", object); Py_DECREF(object); apr_hash_set(wsgi_interpreters_index, "", APR_HASH_KEY_STRING, ""); /* Restore the prior thread state and release the GIL. */ PyGILState_Release(state); /* Register cleanups to performed on process shutdown. */ apr_pool_cleanup_register(p, NULL, wsgi_python_child_cleanup, apr_pool_cleanup_null); /* Loop through import scripts for this process and load them. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process && wsgi_daemon_process->group->threads == 0) ignore_system_exit = 1; #endif if (wsgi_import_list) { apr_array_header_t *scripts = NULL; WSGIScriptFile *entries; WSGIScriptFile *entry; int i; scripts = wsgi_import_list; entries = (WSGIScriptFile *)scripts->elts; for (i = 0; i < scripts->nelts; ++i) { entry = &entries[i]; /* * Stop loading scripts if this is a daemon process and * we have already been flagged to be shutdown. */ if (wsgi_daemon_shutdown) break; if (!strcmp(wsgi_daemon_group, entry->process_group)) { InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; interp = wsgi_acquire_interpreter(entry->application_group); if (!interp) { ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server, "mod_wsgi (pid=%d): Cannot acquire " "interpreter '%s'.", getpid(), entry->application_group); } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(p, entry->handler_script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module. Strictly * speaking shouldn't be required at this point. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && wsgi_server_config->script_reloading) { if (wsgi_reload_required(p, NULL, entry->handler_script, module, NULL)) { /* * Script file has changed. Only support module * reloading for dispatch scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(p, NULL, name, exists, entry->handler_script, entry->process_group, entry->application_group, ignore_system_exit); if (PyErr_Occurred()) PyErr_Clear(); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); } } } } /* The processors for directives. */ static int wsgi_parse_option(apr_pool_t *p, const char **line, const char **name, const char **value) { const char *str = *line, *strend; while (*str && apr_isspace(*str)) ++str; if (!*str || *str == '=') { *line = str; return !APR_SUCCESS; } /* Option must be of form name=value. Extract the name. */ strend = str; while (*strend && *strend != '=' && !apr_isspace(*strend)) ++strend; if (*strend != '=') { *line = str; return !APR_SUCCESS; } *name = apr_pstrndup(p, str, strend-str); *line = strend+1; /* Now extract the value. Note that value can be quoted. */ *value = ap_getword_conf(p, line); return APR_SUCCESS; } static const char *wsgi_add_script_alias(cmd_parms *cmd, void *mconfig, const char *args) { const char *l = NULL; const char *a = NULL; WSGIServerConfig *sconfig = NULL; WSGIAliasEntry *entry = NULL; const char *option = NULL; const char *value = NULL; #if defined(MOD_WSGI_WITH_DAEMONS) const char *process_group = NULL; #else const char *process_group = ""; #endif const char *application_group = NULL; const char *callable_object = NULL; int pass_authorization = -1; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (!sconfig->alias_list) { sconfig->alias_list = apr_array_make(sconfig->pool, 20, sizeof(WSGIAliasEntry)); } l = ap_getword_conf(cmd->pool, &args); if (*l == '\0' || *args == 0) { return apr_pstrcat(cmd->pool, cmd->cmd->name, " requires at least two arguments", cmd->cmd->errmsg ? ", " : NULL, cmd->cmd->errmsg, NULL); } a = ap_getword_conf(cmd->pool, &args); if (*a == '\0') { return apr_pstrcat(cmd->pool, cmd->cmd->name, " requires at least two arguments", cmd->cmd->errmsg ? ", " : NULL, cmd->cmd->errmsg, NULL); } while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI script alias definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; if (!strcmp(value, "%{GLOBAL}")) value = ""; application_group = value; } #if defined(MOD_WSGI_WITH_DAEMONS) else if (!strcmp(option, "process-group")) { if (!*value) return "Invalid name for WSGI process group."; if (!strcmp(value, "%{GLOBAL}")) value = ""; process_group = value; } #endif else if (!strcmp(option, "callable-object")) { if (!*value) return "Invalid name for WSGI callable object."; callable_object = value; } else if (!strcmp(option, "pass-authorization")) { if (!*value) return "Invalid value for authorization flag."; if (strcasecmp(value, "Off") == 0) pass_authorization = 0; else if (strcasecmp(value, "On") == 0) pass_authorization = 1; else return "Invalid value for authorization flag."; } else return "Invalid option to WSGI script alias definition."; } entry = (WSGIAliasEntry *)apr_array_push(sconfig->alias_list); if (cmd->info) { entry->regexp = ap_pregcomp(cmd->pool, l, AP_REG_EXTENDED); if (!entry->regexp) return "Regular expression could not be compiled."; } entry->location = l; entry->application = a; entry->process_group = process_group; entry->application_group = application_group; entry->callable_object = callable_object; entry->pass_authorization = pass_authorization; /* * Only add to import list if both process group and application * group are specified, that they don't include substitution values, * and in the case of WSGIScriptAliasMatch, that the WSGI script * target path doesn't include substitutions from URL pattern. */ if (process_group && application_group && !strstr(process_group, "%{") && !strstr(application_group, "%{") && (!cmd->info || !strstr(a, "$"))) { WSGIScriptFile *object = NULL; if (!wsgi_import_list) { wsgi_import_list = apr_array_make(cmd->pool, 20, sizeof(WSGIScriptFile)); apr_pool_cleanup_register(cmd->pool, &wsgi_import_list, ap_pool_cleanup_set_null, apr_pool_cleanup_null); } object = (WSGIScriptFile *)apr_array_push(wsgi_import_list); object->handler_script = a; object->process_group = process_group; object->application_group = application_group; #if defined(MOD_WSGI_WITH_DAEMONS) if (*object->process_group && strcmp(object->process_group, "%{RESOURCE}") != 0 && strcmp(object->process_group, "%{SERVER}") != 0 && strcmp(object->process_group, "%{HOST}") != 0) { WSGIProcessGroup *group = NULL; WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i; if (!wsgi_daemon_list) return "WSGI process group not yet configured."; entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (!strcmp(entry->name, object->process_group)) { group = entry; break; } } if (!group) return "WSGI process group not yet configured."; if (cmd->server->server_hostname && group->server->server_hostname && strcmp(cmd->server->server_hostname, group->server->server_hostname) && group->server->is_virtual) { return "WSGI process group not accessible."; } if (!cmd->server->server_hostname && group->server->server_hostname && group->server->is_virtual) { return "WSGI process group not matchable."; } if (cmd->server->server_hostname && !group->server->server_hostname && group->server->is_virtual) { return "WSGI process group not matchable."; } } #endif } return NULL; } static const char *wsgi_set_verbose_debugging(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->verbose_debugging = 0; else if (strcasecmp(f, "On") == 0) sconfig->verbose_debugging = 1; else return "WSGIVerboseDebugging must be one of: Off | On"; return NULL; } static const char *wsgi_set_lazy_initialization(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; if (strcasecmp(f, "Off") == 0) wsgi_python_after_fork = 0; else if (strcasecmp(f, "On") == 0) wsgi_python_after_fork = 1; else return "WSGILazyInitialization must be one of: Off | On"; return NULL; } static const char *wsgi_add_python_warnings(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; char **entry = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (!sconfig->python_warnings) { sconfig->python_warnings = apr_array_make(sconfig->pool, 5, sizeof(char*)); } entry = (char **)apr_array_push(sconfig->python_warnings); *entry = apr_pstrdup(sconfig->pool, f); return NULL; } #if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6 static const char *wsgi_set_py3k_warning_flag(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->py3k_warning_flag = 0; else if (strcasecmp(f, "On") == 0) sconfig->py3k_warning_flag = 1; else return "WSGIPy3kWarningFlag must be one of: Off | On"; return NULL; } #endif #if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 3) || \ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6) static const char *wsgi_set_dont_write_bytecode(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->dont_write_bytecode = 0; else if (strcasecmp(f, "On") == 0) sconfig->dont_write_bytecode = 1; else return "WSGIDontWriteBytecode must be one of: Off | On"; return NULL; } #endif static const char *wsgi_set_python_optimize(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_optimize = atoi(f); return NULL; } static const char *wsgi_set_python_home(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_home = f; return NULL; } static const char *wsgi_set_python_path(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_path = f; return NULL; } static const char *wsgi_set_python_eggs(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_eggs = f; return NULL; } static const char *wsgi_set_python_hash_seed(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; /* * Must check this here because if we don't and is wrong, then * Python interpreter will check later and may kill the process. */ if (f && *f != '\0' && strcmp(f, "random") != 0) { const char *endptr = f; unsigned long seed; seed = PyOS_strtoul((char *)f, (char **)&endptr, 10); if (*endptr != '\0' || seed > 4294967295UL || (errno == ERANGE && seed == ULONG_MAX)) { return "WSGIPythonHashSeed must be \"random\" or an integer " "in range [0; 4294967295]"; } } sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->python_hash_seed = f; return NULL; } static const char *wsgi_set_destroy_interpreter(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->destroy_interpreter = 0; else if (strcasecmp(f, "On") == 0) sconfig->destroy_interpreter = 1; else return "WSGIDestroyInterpreter must be one of: Off | On"; return NULL; } static const char *wsgi_set_restrict_embedded(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->restrict_embedded = 0; else if (strcasecmp(f, "On") == 0) sconfig->restrict_embedded = 1; else return "WSGIRestrictEmbedded must be one of: Off | On"; if (sconfig->restrict_embedded) { if (wsgi_python_required == -1) wsgi_python_required = 0; } return NULL; } static const char *wsgi_set_restrict_stdin(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->restrict_stdin = 0; else if (strcasecmp(f, "On") == 0) sconfig->restrict_stdin = 1; else return "WSGIRestrictStdin must be one of: Off | On"; return NULL; } static const char *wsgi_set_restrict_stdout(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->restrict_stdout = 0; else if (strcasecmp(f, "On") == 0) sconfig->restrict_stdout = 1; else return "WSGIRestrictStdout must be one of: Off | On"; return NULL; } static const char *wsgi_set_restrict_signal(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->restrict_signal = 0; else if (strcasecmp(f, "On") == 0) sconfig->restrict_signal = 1; else return "WSGIRestrictSignal must be one of: Off | On"; return NULL; } static const char *wsgi_set_case_sensitivity(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->case_sensitivity = 0; else if (strcasecmp(f, "On") == 0) sconfig->case_sensitivity = 1; else return "WSGICaseSensitivity must be one of: Off | On"; return NULL; } static const char *wsgi_set_restrict_process(cmd_parms *cmd, void *mconfig, const char *args) { apr_table_t *index = apr_table_make(cmd->pool, 5); if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->restrict_process = index; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->restrict_process = index; } while (*args) { const char *option; option = ap_getword_conf(cmd->pool, &args); if (!strcmp(option, "%{GLOBAL}")) option = ""; apr_table_setn(index, option, option); } return NULL; } static const char *wsgi_set_process_group(cmd_parms *cmd, void *mconfig, const char *n) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->process_group = n; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->process_group = n; } return NULL; } static const char *wsgi_set_application_group(cmd_parms *cmd, void *mconfig, const char *n) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->application_group = n; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->application_group = n; } return NULL; } static const char *wsgi_set_callable_object(cmd_parms *cmd, void *mconfig, const char *n) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->callable_object = n; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->callable_object = n; } return NULL; } static const char *wsgi_add_import_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; if (!wsgi_import_list) { wsgi_import_list = apr_array_make(cmd->pool, 20, sizeof(WSGIScriptFile)); apr_pool_cleanup_register(cmd->pool, &wsgi_import_list, ap_pool_cleanup_set_null, apr_pool_cleanup_null); } object = (WSGIScriptFile *)apr_array_push(wsgi_import_list); object->handler_script = ap_getword_conf(cmd->pool, &args); object->process_group = NULL; object->application_group = NULL; if (!object->handler_script || !*object->handler_script) return "Location of import script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI import script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } #if defined(MOD_WSGI_WITH_DAEMONS) else if (!strcmp(option, "process-group")) { if (!*value) return "Invalid name for WSGI process group."; object->process_group = value; } #endif else return "Invalid option to WSGI import script definition."; } if (!object->application_group) return "Name of WSGI application group required."; if (!strcmp(object->application_group, "%{GLOBAL}")) object->application_group = ""; #if defined(MOD_WSGI_WITH_DAEMONS) if (!object->process_group) return "Name of WSGI process group required."; if (!strcmp(object->process_group, "%{GLOBAL}")) object->process_group = ""; if (*object->process_group) { WSGIProcessGroup *group = NULL; WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i; if (!wsgi_daemon_list) return "WSGI process group not yet configured."; entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (!strcmp(entry->name, object->process_group)) { group = entry; break; } } if (!group) return "WSGI process group not yet configured."; if (cmd->server->server_hostname && group->server->server_hostname && strcmp(cmd->server->server_hostname, group->server->server_hostname) && group->server->is_virtual) { return "WSGI process group not accessible."; } if (!cmd->server->server_hostname && group->server->server_hostname && group->server->is_virtual) { return "WSGI process group not matchable."; } if (cmd->server->server_hostname && !group->server->server_hostname && group->server->is_virtual) { return "WSGI process group not matchable."; } } #else object->process_group = ""; #endif if (!*object->process_group) wsgi_python_required = 1; return NULL; } static const char *wsgi_set_dispatch_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of dispatch script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI dispatch script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else return "Invalid option to WSGI dispatch script definition."; } if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->dispatch_script = object; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->dispatch_script = object; } wsgi_python_required = 1; return NULL; } static const char *wsgi_set_pass_apache_request(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->pass_apache_request = 0; else if (strcasecmp(f, "On") == 0) dconfig->pass_apache_request = 1; else return "WSGIPassApacheRequest must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->pass_apache_request = 0; else if (strcasecmp(f, "On") == 0) sconfig->pass_apache_request = 1; else return "WSGIPassApacheRequest must be one of: Off | On"; } return NULL; } static const char *wsgi_set_pass_authorization(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->pass_authorization = 0; else if (strcasecmp(f, "On") == 0) dconfig->pass_authorization = 1; else return "WSGIPassAuthorization must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->pass_authorization = 0; else if (strcasecmp(f, "On") == 0) sconfig->pass_authorization = 1; else return "WSGIPassAuthorization must be one of: Off | On"; } return NULL; } static const char *wsgi_set_script_reloading(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->script_reloading = 0; else if (strcasecmp(f, "On") == 0) dconfig->script_reloading = 1; else return "WSGIScriptReloading must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->script_reloading = 0; else if (strcasecmp(f, "On") == 0) sconfig->script_reloading = 1; else return "WSGIScriptReloading must be one of: Off | On"; } return NULL; } static const char *wsgi_set_error_override(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->error_override = 0; else if (strcasecmp(f, "On") == 0) dconfig->error_override = 1; else return "WSGIErrorOverride must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->error_override = 0; else if (strcasecmp(f, "On") == 0) sconfig->error_override = 1; else return "WSGIErrorOverride must be one of: Off | On"; } return NULL; } static const char *wsgi_set_chunked_request(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->chunked_request = 0; else if (strcasecmp(f, "On") == 0) dconfig->chunked_request = 1; else return "WSGIChunkedRequest must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->chunked_request = 0; else if (strcasecmp(f, "On") == 0) sconfig->chunked_request = 1; else return "WSGIChunkedRequest must be one of: Off | On"; } return NULL; } static const char *wsgi_set_map_head_to_get(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->map_head_to_get = 0; else if (strcasecmp(f, "On") == 0) dconfig->map_head_to_get = 1; else if (strcasecmp(f, "Auto") == 0) dconfig->map_head_to_get = 2; else return "WSGIMapHEADToGET must be one of: Off | On | Auto"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->map_head_to_get = 0; else if (strcasecmp(f, "On") == 0) sconfig->map_head_to_get = 1; else if (strcasecmp(f, "Auto") == 0) sconfig->map_head_to_get = 2; else return "WSGIMapHEADToGET must be one of: Off | On | Auto"; } return NULL; } static const char *wsgi_set_ignore_activity(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->ignore_activity = 0; else if (strcasecmp(f, "On") == 0) dconfig->ignore_activity = 1; else return "WSGIIgnoreActivity must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->ignore_activity = 0; else if (strcasecmp(f, "On") == 0) sconfig->ignore_activity = 1; else return "WSGIIgnoreActivity must be one of: Off | On"; } return NULL; } static char *wsgi_http2env(apr_pool_t *a, const char *w); static const char *wsgi_set_trusted_proxy_headers(cmd_parms *cmd, void *mconfig, const char *args) { apr_array_header_t *headers = NULL; if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (!dconfig->trusted_proxy_headers) { headers = apr_array_make(cmd->pool, 3, sizeof(char*)); dconfig->trusted_proxy_headers = headers; } else headers = dconfig->trusted_proxy_headers; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (!sconfig->trusted_proxy_headers) { headers = apr_array_make(cmd->pool, 3, sizeof(char*)); sconfig->trusted_proxy_headers = headers; } else headers = sconfig->trusted_proxy_headers; } while (*args) { const char **entry = NULL; entry = (const char **)apr_array_push(headers); *entry = wsgi_http2env(cmd->pool, ap_getword_conf(cmd->pool, &args)); } return NULL; } static int wsgi_looks_like_ip(const char *ip) { static const char ipv4_set[] = "0123456789./"; static const char ipv6_set[] = "0123456789abcdef:/"; const char *ptr; /* Zero length value is not valid. */ if (!*ip) return 0; /* Determine if this could be a IPv6 or IPv4 address. */ ptr = ip; if (strchr(ip, ':')) { while(*ptr && strchr(ipv6_set, *ptr) != NULL) ++ptr; } else { while(*ptr && strchr(ipv4_set, *ptr) != NULL) ++ptr; } return (*ptr == '\0'); } static const char *wsgi_set_trusted_proxies(cmd_parms *cmd, void *mconfig, const char *args) { apr_array_header_t *proxy_ips = NULL; if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (!dconfig->trusted_proxies) { proxy_ips = apr_array_make(cmd->pool, 3, sizeof(char*)); dconfig->trusted_proxies = proxy_ips; } else proxy_ips = dconfig->trusted_proxies; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (!sconfig->trusted_proxies) { proxy_ips = apr_array_make(cmd->pool, 3, sizeof(char*)); sconfig->trusted_proxies = proxy_ips; } else proxy_ips = sconfig->trusted_proxies; } while (*args) { const char *proxy_ip; proxy_ip = ap_getword_conf(cmd->pool, &args); if (wsgi_looks_like_ip(proxy_ip)) { char *ip; char *mask; apr_ipsubnet_t **sub; apr_status_t rv; ip = apr_pstrdup(cmd->temp_pool, proxy_ip); if ((mask = ap_strchr(ip, '/'))) *mask++ = '\0'; sub = (apr_ipsubnet_t **)apr_array_push(proxy_ips); rv = apr_ipsubnet_create(sub, ip, mask, cmd->pool); if (rv != APR_SUCCESS) { char msgbuf[128]; apr_strerror(rv, msgbuf, sizeof(msgbuf)); return apr_pstrcat(cmd->pool, "Unable to parse trusted " "proxy IP address/subnet of \"", proxy_ip, "\". ", msgbuf, NULL); } } else { return apr_pstrcat(cmd->pool, "Unable to parse trusted proxy " "IP address/subnet of \"", proxy_ip, "\".", NULL); } } return NULL; } static const char *wsgi_set_enable_sendfile(cmd_parms *cmd, void *mconfig, const char *f) { if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->enable_sendfile = 0; else if (strcasecmp(f, "On") == 0) dconfig->enable_sendfile = 1; else return "WSGIEnableSendfile must be one of: Off | On"; } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->enable_sendfile = 0; else if (strcasecmp(f, "On") == 0) sconfig->enable_sendfile = 1; else return "WSGIEnableSendfile must be one of: Off | On"; } return NULL; } static const char *wsgi_set_access_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIDirectoryConfig *dconfig = NULL; WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of access script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI access script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else return "Invalid option to WSGI access script definition."; } dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->access_script = object; wsgi_python_required = 1; return NULL; } static const char *wsgi_set_auth_user_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIDirectoryConfig *dconfig = NULL; WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of auth user script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI auth user script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else return "Invalid option to WSGI auth user script definition."; } dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->auth_user_script = object; wsgi_python_required = 1; return NULL; } static const char *wsgi_set_auth_group_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIDirectoryConfig *dconfig = NULL; WSGIScriptFile *object = NULL; const char *option = NULL; const char *value = NULL; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of auth group script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI auth group script definition."; } if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else return "Invalid option to WSGI auth group script definition."; } dconfig = (WSGIDirectoryConfig *)mconfig; dconfig->auth_group_script = object; wsgi_python_required = 1; return NULL; } #if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER) static const char *wsgi_set_user_authoritative(cmd_parms *cmd, void *mconfig, const char *f) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->user_authoritative = 0; else if (strcasecmp(f, "On") == 0) dconfig->user_authoritative = 1; else return "WSGIUserAuthoritative must be one of: Off | On"; return NULL; } #endif static const char *wsgi_set_group_authoritative(cmd_parms *cmd, void *mconfig, const char *f) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (strcasecmp(f, "Off") == 0) dconfig->group_authoritative = 0; else if (strcasecmp(f, "On") == 0) dconfig->group_authoritative = 1; else return "WSGIGroupAuthoritative must be one of: Off | On"; return NULL; } static const char *wsgi_add_handler_script(cmd_parms *cmd, void *mconfig, const char *args) { WSGIScriptFile *object = NULL; const char *name = NULL; const char *option = NULL; const char *value = NULL; name = ap_getword_conf(cmd->pool, &args); if (!name || !*name) return "Name for handler script not supplied."; object = newWSGIScriptFile(cmd->pool); object->handler_script = ap_getword_conf(cmd->pool, &args); if (!object->handler_script || !*object->handler_script) return "Location of handler script not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI handler script definition."; } if (!strcmp(option, "process-group")) { if (!*value) return "Invalid name for WSGI process group."; object->process_group = value; } else if (!strcmp(option, "application-group")) { if (!*value) return "Invalid name for WSGI application group."; object->application_group = value; } else if (!strcmp(option, "pass-authorization")) { if (!*value) return "Invalid value for authorization flag."; if (strcasecmp(value, "Off") == 0) object->pass_authorization = "0"; else if (strcasecmp(value, "On") == 0) object->pass_authorization = "1"; else return "Invalid value for authorization flag."; } else return "Invalid option to WSGI handler script definition."; } if (cmd->path) { WSGIDirectoryConfig *dconfig = NULL; dconfig = (WSGIDirectoryConfig *)mconfig; if (!dconfig->handler_scripts) dconfig->handler_scripts = apr_hash_make(cmd->pool); apr_hash_set(dconfig->handler_scripts, name, APR_HASH_KEY_STRING, object); } else { WSGIServerConfig *sconfig = NULL; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (!sconfig->handler_scripts) sconfig->handler_scripts = apr_hash_make(cmd->pool); apr_hash_set(sconfig->handler_scripts, name, APR_HASH_KEY_STRING, object); } return NULL; } static const char *wsgi_set_server_metrics(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->server_metrics = 0; else if (strcasecmp(f, "On") == 0) sconfig->server_metrics = 1; else return "WSGIServerMetrics must be one of: Off | On"; return NULL; } static const char *wsgi_set_newrelic_config_file( cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->newrelic_config_file = f; return NULL; } static const char *wsgi_set_newrelic_environment( cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->newrelic_environment = f; return NULL; } /* Handler for the translate name phase. */ static long wsgi_alias_matches(const char *uri, const char *alias_fakename) { /* Code for this function from Apache mod_alias module. */ const char *aliasp = alias_fakename, *urip = uri; while (*aliasp) { if (*aliasp == '/') { /* any number of '/' in the alias matches any number in * the supplied URI, but there must be at least one... */ if (*urip != '/') return 0; do { ++aliasp; } while (*aliasp == '/'); do { ++urip; } while (*urip == '/'); } else { /* Other characters are compared literally */ if (*urip++ != *aliasp++) return 0; } } /* Check last alias path component matched all the way */ if (aliasp[-1] != '/' && *urip != '\0' && *urip != '/') return 0; /* Return number of characters from URI which matched (may be * greater than length of alias, since we may have matched * doubled slashes) */ return urip - uri; } static int wsgi_hook_intercept(request_rec *r) { WSGIServerConfig *config = NULL; apr_array_header_t *aliases = NULL; WSGIAliasEntry *entries = NULL; WSGIAliasEntry *entry = NULL; ap_regmatch_t matches[AP_MAX_REG_MATCH]; const char *location = NULL; const char *application = NULL; int i = 0; config = ap_get_module_config(r->server->module_config, &wsgi_module); if (!config->alias_list) return DECLINED; if (r->uri[0] != '/' && r->uri[0]) return DECLINED; aliases = config->alias_list; entries = (WSGIAliasEntry *)aliases->elts; for (i = 0; i < aliases->nelts; ++i) { long l = 0; entry = &entries[i]; if (entry->regexp) { if (!ap_regexec(entry->regexp, r->uri, AP_MAX_REG_MATCH, matches, 0)) { if (entry->application) { l = matches[0].rm_eo; location = apr_pstrndup(r->pool, r->uri, l); application = ap_pregsub(r->pool, entry->application, r->uri, AP_MAX_REG_MATCH, matches); } } } else if (entry->location) { l = wsgi_alias_matches(r->uri, entry->location); location = entry->location; application = entry->application; } if (l > 0) { if (!strcmp(location, "/")) { r->filename = apr_pstrcat(r->pool, application, r->uri, NULL); } else { r->filename = apr_pstrcat(r->pool, application, r->uri + l, NULL); } r->handler = "wsgi-script"; apr_table_setn(r->notes, "alias-forced-type", r->handler); if (entry->process_group) { apr_table_setn(r->notes, "mod_wsgi.process_group", entry->process_group); } if (entry->application_group) { apr_table_setn(r->notes, "mod_wsgi.application_group", entry->application_group); } if (entry->callable_object) { apr_table_setn(r->notes, "mod_wsgi.callable_object", entry->callable_object); } if (entry->pass_authorization == 0) apr_table_setn(r->notes, "mod_wsgi.pass_authorization", "0"); else if (entry->pass_authorization == 1) apr_table_setn(r->notes, "mod_wsgi.pass_authorization", "1"); return OK; } } return DECLINED; } /* Handler for the response handler phase. */ static void wsgi_drop_invalid_headers(request_rec *r); static void wsgi_process_proxy_headers(request_rec *r); static void wsgi_build_environment(request_rec *r) { WSGIRequestConfig *config = NULL; const char *value = NULL; const char *script_name = NULL; const char *path_info = NULL; conn_rec *c = r->connection; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); /* * Remove any invalid headers which use invalid characters. * This is necessary to ensure that someone doesn't try and * take advantage of header spoofing. This can come about * where characters other than alphanumerics or '-' are used * as the conversion of non alphanumerics to '_' means one * can get collisions. This is technically only an issue * with Apache 2.2 as Apache 2.4 addresses the problem and * drops them anyway. Still go through and drop them even * for Apache 2.4 as not sure which version of Apache 2.4 * introduces the change. */ wsgi_drop_invalid_headers(r); /* Populate environment with standard CGI variables. */ ap_add_cgi_vars(r); ap_add_common_vars(r); /* * Mutate a HEAD request into a GET request. This is * required because WSGI specification doesn't lay out * clearly how WSGI applications should treat a HEAD * request. Generally authors of WSGI applications or * frameworks take it that they do not need to return any * content, but this screws up any Apache output filters * which need to see all the response content in order to * correctly set up response headers for a HEAD request such * that they are the same as a GET request. Thus change a * HEAD request into a GET request to ensure that request * content is generated. If using Apache 2.X we can skip * doing this if we know there is no output filter that * might change the content and/or headers. * * The default behaviour here of changing it if an output * filter is detected can be overridden using the directive * WSGIMapHEADToGet. The default value is 'Auto'. If set to * 'On' then it remapped regardless of whether an output * filter is present. If 'Off' then it will be left alone * and the original value used. */ if (config->map_head_to_get == 2) { if (r->method_number == M_GET && r->header_only && r->output_filters->frec->ftype < AP_FTYPE_PROTOCOL) apr_table_setn(r->subprocess_env, "REQUEST_METHOD", "GET"); } else if (config->map_head_to_get == 1) { if (r->method_number == M_GET) apr_table_setn(r->subprocess_env, "REQUEST_METHOD", "GET"); } /* * If enabled, pass along authorisation headers which Apache * leaves out of CGI environment. WSGI still needs to see * these if it needs to implement any of the standard * authentication schemes such as Basic and Digest. We do * not pass these through by default though as it can result * in passwords being leaked though to a WSGI application * when it shouldn't. This would be a problem where there is * some sort of site wide authorisation scheme in place * which has got nothing to do with specific applications. */ if (config->pass_authorization) { value = apr_table_get(r->headers_in, "Authorization"); if (value) apr_table_setn(r->subprocess_env, "HTTP_AUTHORIZATION", value); } /* If PATH_INFO not set, set it to an empty string. */ value = apr_table_get(r->subprocess_env, "PATH_INFO"); if (!value) apr_table_setn(r->subprocess_env, "PATH_INFO", ""); /* * Multiple slashes are not always collapsed into a single * slash in SCRIPT_NAME and PATH_INFO with Apache 1.3 and * Apache 2.X behaving a bit differently. Because some WSGI * applications don't deal with multiple slashes properly we * collapse any duplicate slashes to a single slash so * Apache behaviour is consistent across all versions. We * don't care that PATH_TRANSLATED can on Apache 1.3 still * contain multiple slashes as that should not be getting * used from a WSGI application anyway. */ script_name = apr_table_get(r->subprocess_env, "SCRIPT_NAME"); if (*script_name == '/') { while (*script_name && (*(script_name+1) == '/')) script_name++; script_name = apr_pstrdup(r->pool, script_name); ap_no2slash((char*)script_name); apr_table_setn(r->subprocess_env, "SCRIPT_NAME", script_name); } path_info = apr_table_get(r->subprocess_env, "PATH_INFO"); if (*path_info == '/') { while (*path_info && (*(path_info+1) == '/')) path_info++; path_info = apr_pstrdup(r->pool, path_info); ap_no2slash((char*)path_info); apr_table_setn(r->subprocess_env, "PATH_INFO", path_info); } /* * Save away the SCRIPT_NAME and PATH_INFO values at this point * so we have a way of determining if they are rewritten somehow. * This can be important when dealing with rewrite rules and * a trusted header was being handled for SCRIPT_NAME. */ apr_table_setn(r->subprocess_env, "mod_wsgi.script_name", script_name); apr_table_setn(r->subprocess_env, "mod_wsgi.path_info", path_info); /* * Perform fixups on environment based on trusted proxy headers * sent through from a front end proxy. */ wsgi_process_proxy_headers(r); /* * Determine whether connection uses HTTPS protocol. This has * to be done after and fixups due to trusted proxy headers. */ if (!wsgi_is_https) wsgi_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (wsgi_is_https && wsgi_is_https(r->connection)) apr_table_set(r->subprocess_env, "HTTPS", "1"); /* * Set values specific to mod_wsgi configuration. These control * aspects of how a request is managed but don't strictly need * to be passed through to the application itself. It is though * easier to set them here as then they are carried across to * the daemon process as part of the environment where they can * be extracted and used. */ apr_table_setn(r->subprocess_env, "mod_wsgi.process_group", config->process_group); apr_table_setn(r->subprocess_env, "mod_wsgi.application_group", config->application_group); apr_table_setn(r->subprocess_env, "mod_wsgi.callable_object", config->callable_object); apr_table_setn(r->subprocess_env, "mod_wsgi.request_handler", r->handler); apr_table_setn(r->subprocess_env, "mod_wsgi.handler_script", config->handler_script); apr_table_setn(r->subprocess_env, "mod_wsgi.script_reloading", apr_psprintf(r->pool, "%d", config->script_reloading)); #if defined(MOD_WSGI_WITH_DAEMONS) apr_table_setn(r->subprocess_env, "mod_wsgi.listener_host", c->local_addr->hostname ? c->local_addr->hostname : ""); apr_table_setn(r->subprocess_env, "mod_wsgi.listener_port", apr_psprintf(r->pool, "%d", c->local_addr->port)); #endif apr_table_setn(r->subprocess_env, "mod_wsgi.enable_sendfile", apr_psprintf(r->pool, "%d", config->enable_sendfile)); apr_table_setn(r->subprocess_env, "mod_wsgi.ignore_activity", apr_psprintf(r->pool, "%d", config->ignore_activity)); apr_table_setn(r->subprocess_env, "mod_wsgi.request_start", apr_psprintf(r->pool, "%" APR_TIME_T_FMT, r->request_time)); #if AP_MODULE_MAGIC_AT_LEAST(20100923,2) if (!r->log_id) { const char **id; /* Need to cast const away. */ id = &((request_rec *)r)->log_id; ap_run_generate_log_id(c, r, id); } if (r->log_id) apr_table_setn(r->subprocess_env, "mod_wsgi.request_id", r->log_id); if (r->connection->log_id) apr_table_setn(r->subprocess_env, "mod_wsgi.connection_id", r->connection->log_id); #endif } typedef struct { PyObject_HEAD request_rec *r; WSGIRequestConfig *config; PyObject *log; } DispatchObject; static DispatchObject *newDispatchObject(request_rec *r, WSGIRequestConfig *config) { DispatchObject *self; self = PyObject_New(DispatchObject, &Dispatch_Type); if (self == NULL) return NULL; self->config = config; self->r = r; self->log = newLogObject(r, APLOG_ERR, NULL, 0); return self; } static void Dispatch_dealloc(DispatchObject *self) { Py_DECREF(self->log); PyObject_Del(self); } static PyObject *Dispatch_environ(DispatchObject *self, const char *group) { request_rec *r = NULL; PyObject *vars = NULL; PyObject *object = NULL; const apr_array_header_t *head = NULL; const apr_table_entry_t *elts = NULL; int i = 0; /* Create the WSGI environment dictionary. */ vars = PyDict_New(); /* Merge the CGI environment into the WSGI environment. */ r = self->r; head = apr_table_elts(r->subprocess_env); elts = (apr_table_entry_t *)head->elts; for (i = 0; i < head->nelts; ++i) { if (elts[i].key) { if (elts[i].val) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(elts[i].val, strlen(elts[i].val), NULL); #else object = PyString_FromString(elts[i].val); #endif PyDict_SetItemString(vars, elts[i].key, object); Py_DECREF(object); } else PyDict_SetItemString(vars, elts[i].key, Py_None); } } /* * Need to override process and application group as they * are set to the default target, where as we need to set * them to context dispatch script is run in. Also need * to remove callable object reference. */ #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString(""); #else object = PyString_FromString(""); #endif PyDict_SetItemString(vars, "mod_wsgi.process_group", object); Py_DECREF(object); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(group, strlen(group), NULL); #else object = PyString_FromString(group); #endif PyDict_SetItemString(vars, "mod_wsgi.application_group", object); Py_DECREF(object); PyDict_DelItemString(vars, "mod_wsgi.callable_object"); /* * Setup log object for WSGI errors. Don't decrement * reference to log object as keep reference to it. */ object = (PyObject *)self->log; PyDict_SetItemString(vars, "wsgi.errors", object); /* * If Apache extensions are enabled add a CObject reference * to the Apache request_rec structure instance. */ if (!wsgi_daemon_pool && self->config->pass_apache_request) { #if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2) || \ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 7) object = PyCapsule_New(self->r, 0, 0); #else object = PyCObject_FromVoidPtr(self->r, 0); #endif PyDict_SetItemString(vars, "apache.request_rec", object); Py_DECREF(object); } /* * Extensions for accessing SSL certificate information from * mod_ssl when in use. */ #if 0 object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https"); PyDict_SetItemString(vars, "mod_ssl.is_https", object); Py_DECREF(object); object = PyObject_GetAttrString((PyObject *)self, "ssl_var_lookup"); PyDict_SetItemString(vars, "mod_ssl.var_lookup", object); Py_DECREF(object); #endif return vars; } static PyObject *Dispatch_ssl_is_https(DispatchObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, ":ssl_is_https")) return NULL; ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (ssl_is_https == 0) return Py_BuildValue("i", 0); return Py_BuildValue("i", ssl_is_https(self->r->connection)); } static PyObject *Dispatch_ssl_var_lookup(DispatchObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *ssl_var_lookup = 0; PyObject *item = NULL; char *name = 0; char *value = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:ssl_var_lookup", &item)) return NULL; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "byte string value expected, " "value containing non 'latin-1' characters found"); Py_DECREF(item); return NULL; } Py_DECREF(item); item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "byte string value expected, value " "of type %.200s found", item->ob_type->tp_name); Py_DECREF(item); return NULL; } name = PyString_AsString(item); ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); if (ssl_var_lookup == 0) { Py_INCREF(Py_None); return Py_None; } value = ssl_var_lookup(self->r->pool, self->r->server, self->r->connection, self->r, name); if (!value) { Py_INCREF(Py_None); return Py_None; } #if PY_MAJOR_VERSION >= 3 return PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else return PyString_FromString(value); #endif } static PyMethodDef Dispatch_methods[] = { { "ssl_is_https", (PyCFunction)Dispatch_ssl_is_https, METH_VARARGS, 0 }, { "ssl_var_lookup", (PyCFunction)Dispatch_ssl_var_lookup, METH_VARARGS, 0 }, { NULL, NULL} }; static PyTypeObject Dispatch_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Dispatch", /*tp_name*/ sizeof(DispatchObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Dispatch_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ Dispatch_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; static int wsgi_execute_dispatch(request_rec *r) { WSGIRequestConfig *config; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script = NULL; const char *group = NULL; int status; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); if (!config->dispatch_script) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Location of WSGI dispatch " "script not provided.", getpid()); return HTTP_INTERNAL_SERVER_ERROR; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->dispatch_script->handler_script; group = wsgi_server_group(r, config->dispatch_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return HTTP_INTERNAL_SERVER_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for dispatch scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group, 0); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Log any details of exceptions if import failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Assume everything will be okay for now. */ status = OK; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; DispatchObject *adapter = NULL; module_dict = PyModule_GetDict(module); adapter = newDispatchObject(r, config); if (adapter) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *method = NULL; vars = Dispatch_environ(adapter, group); /* First check process_group(). */ #if defined(MOD_WSGI_WITH_DAEMONS) object = PyDict_GetItemString(module_dict, "process_group"); if (object) { PyObject *result = NULL; if (adapter) { Py_INCREF(object); args = Py_BuildValue("(O)", vars); result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); if (result) { if (result != Py_None) { if (PyString_Check(result)) { const char *s; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_process_group(r, s); config->process_group = s; apr_table_setn(r->subprocess_env, "mod_wsgi.process_group", config->process_group); } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(result); if (!latin_item) { PyErr_SetString(PyExc_TypeError, "Process group must be " "a byte string, value " "containing non 'latin-1' " "characters found"); status = HTTP_INTERNAL_SERVER_ERROR; } else { const char *s; Py_DECREF(result); result = latin_item; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_process_group(r, s); config->process_group = s; apr_table_setn(r->subprocess_env, "mod_wsgi.process_group", config->process_group); } } #endif else { PyErr_SetString(PyExc_TypeError, "Process " "group must be a byte string"); status = HTTP_INTERNAL_SERVER_ERROR; } } Py_DECREF(result); } else status = HTTP_INTERNAL_SERVER_ERROR; /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); } object = NULL; } #endif /* Now check application_group(). */ if (status == OK) object = PyDict_GetItemString(module_dict, "application_group"); if (object) { PyObject *result = NULL; if (adapter) { Py_INCREF(object); args = Py_BuildValue("(O)", vars); result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); if (result) { if (result != Py_None) { if (PyString_Check(result)) { const char *s; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_application_group(r, s); config->application_group = s; apr_table_setn(r->subprocess_env, "mod_wsgi.application_group", config->application_group); } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(result); if (!latin_item) { PyErr_SetString(PyExc_TypeError, "Application group must " "be a byte string, value " "containing non 'latin-1' " "characters found"); status = HTTP_INTERNAL_SERVER_ERROR; } else { const char *s; Py_DECREF(result); result = latin_item; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_application_group(r, s); config->application_group = s; apr_table_setn(r->subprocess_env, "mod_wsgi.application_group", config->application_group); } } #endif else { PyErr_SetString(PyExc_TypeError, "Application " "group must be a string " "object"); status = HTTP_INTERNAL_SERVER_ERROR; } } Py_DECREF(result); } else status = HTTP_INTERNAL_SERVER_ERROR; /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); } object = NULL; } /* Now check callable_object(). */ if (status == OK) object = PyDict_GetItemString(module_dict, "callable_object"); if (object) { PyObject *result = NULL; if (adapter) { Py_INCREF(object); args = Py_BuildValue("(O)", vars); result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); if (result) { if (result != Py_None) { if (PyString_Check(result)) { const char *s; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_callable_object(r, s); config->callable_object = s; apr_table_setn(r->subprocess_env, "mod_wsgi.callable_object", config->callable_object); } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(result); if (!latin_item) { PyErr_SetString(PyExc_TypeError, "Callable object must " "be a byte string, value " "containing non 'latin-1' " "characters found"); status = HTTP_INTERNAL_SERVER_ERROR; } else { const char *s; Py_DECREF(result); result = latin_item; s = PyString_AsString(result); s = apr_pstrdup(r->pool, s); s = wsgi_callable_object(r, s); config->callable_object = s; apr_table_setn(r->subprocess_env, "mod_wsgi.callable_object", config->callable_object); } } #endif else { PyErr_SetString(PyExc_TypeError, "Callable " "object must be a string " "object"); status = HTTP_INTERNAL_SERVER_ERROR; } } Py_DECREF(result); } else status = HTTP_INTERNAL_SERVER_ERROR; /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); } object = NULL; } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { object = PyObject_CallObject(method, NULL); } Py_XDECREF(object); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); Py_DECREF(vars); } } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } static int wsgi_is_script_aliased(request_rec *r) { const char *t = NULL; t = apr_table_get(r->notes, "alias-forced-type"); return t && (!strcasecmp(t, "wsgi-script")); } #if defined(MOD_WSGI_WITH_DAEMONS) static int wsgi_execute_remote(request_rec *r); #endif static int wsgi_hook_handler(request_rec *r) { int status; apr_off_t limit = 0; WSGIRequestConfig *config = NULL; const char *value = NULL; const char *tenc = NULL; const char *lenp = NULL; /* Filter out the obvious case of no handler defined. */ if (!r->handler) return DECLINED; /* * Construct request configuration and cache it in the * request object against this module so can access it later * from handler code. */ config = wsgi_create_req_config(r->pool, r); ap_set_module_config(r->request_config, &wsgi_module, config); /* * Only process requests for this module. First check for * where target is the actual WSGI script. Then need to * check for the case where handler name mapped to a handler * script definition. */ if (!strcmp(r->handler, "wsgi-script") || !strcmp(r->handler, "application/x-httpd-wsgi")) { /* * Ensure that have adequate privileges to run the WSGI * script. Require ExecCGI to be specified in Options for * this. In doing this, using the wider interpretation that * ExecCGI refers to any executable like script even though * not a separate process execution. */ if (!(ap_allow_options(r) & OPT_EXECCGI) && !wsgi_is_script_aliased(r)) { wsgi_log_script_error(r, "Options ExecCGI is off in this " "directory", r->filename); return HTTP_FORBIDDEN; } /* Ensure target script exists and is a file. */ if (r->finfo.filetype == 0) { wsgi_log_script_error(r, "Target WSGI script not found or unable " "to stat", r->filename); return HTTP_NOT_FOUND; } if (r->finfo.filetype == APR_DIR) { wsgi_log_script_error(r, "Attempt to invoke directory as WSGI " "application", r->filename); return HTTP_FORBIDDEN; } if (wsgi_is_script_aliased(r)) { /* * Allow any configuration supplied through request notes to * override respective values. Request notes are used when * configuration supplied with WSGIScriptAlias directives. */ if ((value = apr_table_get(r->notes, "mod_wsgi.process_group"))) config->process_group = wsgi_process_group(r, value); if ((value = apr_table_get(r->notes, "mod_wsgi.application_group"))) config->application_group = wsgi_application_group(r, value); if ((value = apr_table_get(r->notes, "mod_wsgi.callable_object"))) config->callable_object = value; if ((value = apr_table_get(r->notes, "mod_wsgi.pass_authorization"))) { if (!strcmp(value, "1")) config->pass_authorization = 1; else config->pass_authorization = 0; } } } #if 0 else if (strstr(r->handler, "wsgi-handler=") == r->handler) { config->handler_script = apr_pstrcat(r->pool, r->handler+13, NULL); config->callable_object = "handle_request"; } #endif else if (config->handler_scripts) { WSGIScriptFile *entry; entry = (WSGIScriptFile *)apr_hash_get(config->handler_scripts, r->handler, APR_HASH_KEY_STRING); if (entry) { config->handler_script = entry->handler_script; config->callable_object = "handle_request"; if ((value = entry->process_group)) config->process_group = wsgi_process_group(r, value); if ((value = entry->application_group)) config->application_group = wsgi_application_group(r, value); if ((value = entry->pass_authorization)) { if (!strcmp(value, "1")) config->pass_authorization = 1; else config->pass_authorization = 0; } } else return DECLINED; } else return DECLINED; /* * Honour AcceptPathInfo directive. Default behaviour is * accept additional path information. */ #if AP_MODULE_MAGIC_AT_LEAST(20011212,0) if ((r->used_path_info == AP_REQ_REJECT_PATH_INFO) && r->path_info && *r->path_info) { wsgi_log_script_error(r, "AcceptPathInfo off disallows user's path", r->filename); return HTTP_NOT_FOUND; } #endif /* * Setup policy to apply if request contains a body. Note that the * WSGI specification doesn't strictly allow for chunked request * content as CONTENT_LENGTH is required when reading input and * an application isn't meant to read more than what is defined by * CONTENT_LENGTH. We still optionally allow chunked request content. * For an application to use the content, it has to ignore the WSGI * specification and use read() with no arguments to read all * available input, or call read() with specific block size until * read() returns an empty string. */ tenc = apr_table_get(r->headers_in, "Transfer-Encoding"); if (tenc) { /* Only chunked transfer encoding is supported. */ if (strcasecmp(tenc, "chunked")) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Unexpected value for Transfer-Encoding of '%s' " "supplied. Only 'chunked' supported.", tenc), r->filename); return HTTP_NOT_IMPLEMENTED; } /* Only allow chunked requests when explicitly enabled. */ if (!config->chunked_request) { wsgi_log_script_error(r, "Received request requiring chunked " "transfer encoding, but optional support for chunked " "transfer encoding has not been enabled.", r->filename); return HTTP_LENGTH_REQUIRED; } /* * When chunked transfer encoding is specified, there should * not be any content length specified. */ if (lenp) { wsgi_log_script_error(r, "Unexpected Content-Length header " "supplied where Transfer-Encoding was specified " "as 'chunked'.", r->filename); return HTTP_BAD_REQUEST; } } /* * Check to see if the request content is too large if the * Content-Length header is defined then end the request here. We do * this as otherwise it will not be done until first time input data * is read in by the application. Problem is that underlying HTTP * output filter will also generate a 413 response and the error * raised from the application will be appended to that. The call to * ap_discard_request_body() is hopefully enough to trigger sending * of the 413 response by the HTTP filter. */ lenp = apr_table_get(r->headers_in, "Content-Length"); if (lenp) { char *endstr; apr_off_t length; if (wsgi_strtoff(&length, lenp, &endstr, 10) || *endstr || length < 0) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Invalid Content-Length header value of '%s' was " "supplied.", lenp), r->filename); return HTTP_BAD_REQUEST; } limit = ap_get_limit_req_body(r); if (limit && limit < length) { ap_discard_request_body(r); return OK; } } /* Build the sub process environment. */ config->request_start = r->request_time; wsgi_build_environment(r); /* * If a dispatch script has been provided, as appropriate * allow it to override any of the configuration related * to what context the script will be executed in and what * the target callable object for the application is. */ if (config->dispatch_script) { status = wsgi_execute_dispatch(r); if (status != OK) return status; } /* * Execute the target WSGI application script or proxy * request to one of the daemon processes as appropriate. */ #if defined(MOD_WSGI_WITH_DAEMONS) status = wsgi_execute_remote(r); if (status != DECLINED) return status; #endif #if defined(MOD_WSGI_DISABLE_EMBEDDED) wsgi_log_script_error(r, "Embedded mode of mod_wsgi disabled at compile " "time", r->filename); return HTTP_INTERNAL_SERVER_ERROR; #endif if (wsgi_server_config->restrict_embedded == 1) { wsgi_log_script_error(r, "Embedded mode of mod_wsgi disabled by " "runtime configuration", r->filename); return HTTP_INTERNAL_SERVER_ERROR; } return wsgi_execute_script(r); } /* * Apache 2.X and UNIX specific code for creation and management * of distinct daemon processes. */ #if defined(MOD_WSGI_WITH_DAEMONS) static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig, const char *args) { const char *name = NULL; const char *user = NULL; const char *group = NULL; int processes = 1; int multiprocess = 0; int threads = 15; long umask = -1; const char *root = NULL; const char *home = NULL; const char *lang = NULL; const char *locale = NULL; const char *python_home = NULL; const char *python_path = NULL; const char *python_eggs = NULL; int stack_size = 0; int maximum_requests = 0; int startup_timeout = 0; int shutdown_timeout = 5; int deadlock_timeout = 300; int inactivity_timeout = 0; int request_timeout = 0; int graceful_timeout = 15; int eviction_timeout = 0; int restart_interval = 0; int connect_timeout = 15; int socket_timeout = 0; int queue_timeout = 0; const char *socket_user = NULL; int listen_backlog = WSGI_LISTEN_BACKLOG; const char *display_name = NULL; int send_buffer_size = 0; int recv_buffer_size = 0; int header_buffer_size = 0; int response_buffer_size = 0; int response_socket_timeout = 0; const char *script_user = NULL; const char *script_group = NULL; int cpu_time_limit = 0; int cpu_priority = 0; apr_int64_t memory_limit = 0; apr_int64_t virtual_memory_limit = 0; uid_t uid; uid_t gid; const char *groups_list = NULL; int groups_count = 0; gid_t *groups = NULL; int server_metrics = 0; const char *newrelic_config_file = NULL; const char *newrelic_environment = NULL; const char *option = NULL; const char *value = NULL; WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i; /* * Set the defaults for user/group from values * defined for the User/Group directives in main * Apache configuration. */ uid = ap_unixd_config.user_id; user = ap_unixd_config.user_name; gid = ap_unixd_config.group_id; /* Now parse options for directive. */ name = ap_getword_conf(cmd->pool, &args); if (!name || !*name) return "Name of WSGI daemon process not supplied."; while (*args) { if (wsgi_parse_option(cmd->pool, &args, &option, &value) != APR_SUCCESS) { return "Invalid option to WSGI daemon process definition."; } if (!strcmp(option, "user")) { if (!*value) return "Invalid user for WSGI daemon process."; user = value; uid = ap_uname2id(user); if (uid == 0) return "WSGI process blocked from running as root."; if (*user == '#') { struct passwd *entry = NULL; if ((entry = getpwuid(uid)) == NULL) return "Couldn't determine user name from uid."; user = entry->pw_name; } } else if (!strcmp(option, "group")) { if (!*value) return "Invalid group for WSGI daemon process."; group = value; gid = ap_gname2id(group); } else if (!strcmp(option, "supplementary-groups")) { groups_list = value; } else if (!strcmp(option, "processes")) { if (!*value) return "Invalid process count for WSGI daemon process."; processes = atoi(value); if (processes < 1) return "Invalid process count for WSGI daemon process."; multiprocess = 1; } else if (!strcmp(option, "threads")) { if (!*value) return "Invalid thread count for WSGI daemon process."; threads = atoi(value); if (threads < 0 || threads >= WSGI_STACK_LAST-1) return "Invalid thread count for WSGI daemon process."; } else if (!strcmp(option, "umask")) { if (!*value) return "Invalid umask for WSGI daemon process."; errno = 0; umask = strtol(value, (char **)&value, 8); if (*value || errno == ERANGE || umask < 0) return "Invalid umask for WSGI daemon process."; } else if (!strcmp(option, "chroot")) { if (geteuid()) return "Cannot chroot WSGI daemon process when not root."; if (*value != '/') return "Invalid chroot directory for WSGI daemon process."; root = value; } else if (!strcmp(option, "home")) { if (*value != '/') return "Invalid home directory for WSGI daemon process."; home = value; } else if (!strcmp(option, "lang")) { lang = value; } else if (!strcmp(option, "locale")) { locale = value; } else if (!strcmp(option, "python-home")) { python_home = value; } else if (!strcmp(option, "python-path")) { python_path = value; } else if (!strcmp(option, "python-eggs")) { python_eggs = value; } #if (APR_MAJOR_VERSION >= 1) else if (!strcmp(option, "stack-size")) { if (!*value) return "Invalid stack size for WSGI daemon process."; stack_size = atoi(value); if (stack_size <= 0) return "Invalid stack size for WSGI daemon process."; } #endif else if (!strcmp(option, "maximum-requests")) { if (!*value) return "Invalid request count for WSGI daemon process."; maximum_requests = atoi(value); if (maximum_requests < 0) return "Invalid request count for WSGI daemon process."; } else if (!strcmp(option, "startup-timeout")) { if (!*value) return "Invalid startup timeout for WSGI daemon process."; startup_timeout = atoi(value); if (startup_timeout < 0) return "Invalid startup timeout for WSGI daemon process."; } else if (!strcmp(option, "shutdown-timeout")) { if (!*value) return "Invalid shutdown timeout for WSGI daemon process."; shutdown_timeout = atoi(value); if (shutdown_timeout < 0) return "Invalid shutdown timeout for WSGI daemon process."; } else if (!strcmp(option, "deadlock-timeout")) { if (!*value) return "Invalid deadlock timeout for WSGI daemon process."; deadlock_timeout = atoi(value); if (deadlock_timeout < 0) return "Invalid deadlock timeout for WSGI daemon process."; } else if (!strcmp(option, "inactivity-timeout")) { if (!*value) return "Invalid inactivity timeout for WSGI daemon process."; inactivity_timeout = atoi(value); if (inactivity_timeout < 0) return "Invalid inactivity timeout for WSGI daemon process."; } else if (!strcmp(option, "request-timeout")) { if (!*value) return "Invalid request timeout for WSGI daemon process."; request_timeout = atoi(value); if (request_timeout < 0) return "Invalid request timeout for WSGI daemon process."; } else if (!strcmp(option, "graceful-timeout")) { if (!*value) return "Invalid graceful timeout for WSGI daemon process."; graceful_timeout = atoi(value); if (graceful_timeout < 0) return "Invalid graceful timeout for WSGI daemon process."; } else if (!strcmp(option, "eviction-timeout")) { if (!*value) return "Invalid eviction timeout for WSGI daemon process."; eviction_timeout = atoi(value); if (eviction_timeout < 0) return "Invalid eviction timeout for WSGI daemon process."; } else if (!strcmp(option, "restart-interval")) { if (!*value) return "Invalid restart interval for WSGI daemon process."; restart_interval = atoi(value); if (restart_interval < 0) return "Invalid restart interval for WSGI daemon process."; } else if (!strcmp(option, "connect-timeout")) { if (!*value) return "Invalid connect timeout for WSGI daemon process."; connect_timeout = atoi(value); if (connect_timeout < 0) return "Invalid connect timeout for WSGI daemon process."; } else if (!strcmp(option, "socket-timeout")) { if (!*value) return "Invalid socket timeout for WSGI daemon process."; socket_timeout = atoi(value); if (socket_timeout < 0) return "Invalid socket timeout for WSGI daemon process."; } else if (!strcmp(option, "queue-timeout")) { if (!*value) return "Invalid queue timeout for WSGI daemon process."; queue_timeout = atoi(value); if (queue_timeout < 0) return "Invalid queue timeout for WSGI daemon process."; } else if (!strcmp(option, "listen-backlog")) { if (!*value) return "Invalid listen backlog for WSGI daemon process."; listen_backlog = atoi(value); if (listen_backlog < 0) return "Invalid listen backlog for WSGI daemon process."; } else if (!strcmp(option, "display-name")) { display_name = value; } else if (!strcmp(option, "send-buffer-size")) { if (!*value) return "Invalid send buffer size for WSGI daemon process."; send_buffer_size = atoi(value); if (send_buffer_size < 512 && send_buffer_size != 0) { return "Send buffer size must be >= 512 bytes, " "or 0 for system default."; } } else if (!strcmp(option, "receive-buffer-size")) { if (!*value) return "Invalid receive buffer size for WSGI daemon process."; recv_buffer_size = atoi(value); if (recv_buffer_size < 512 && recv_buffer_size != 0) { return "Receive buffer size must be >= 512 bytes, " "or 0 for system default."; } } else if (!strcmp(option, "header-buffer-size")) { if (!*value) return "Invalid header buffer size for WSGI daemon process."; header_buffer_size = atoi(value); if (header_buffer_size < 8192 && header_buffer_size != 0) { return "Header buffer size must be >= 8192 bytes, " "or 0 for default."; } } else if (!strcmp(option, "response-buffer-size")) { if (!*value) return "Invalid response buffer size for WSGI daemon process."; response_buffer_size = atoi(value); if (response_buffer_size < 65536 && response_buffer_size != 0) { return "Response buffer size must be >= 65536 bytes, " "or 0 for default."; } } else if (!strcmp(option, "response-socket-timeout")) { if (!*value) return "Invalid response socket timeout for WSGI daemon process."; response_socket_timeout = atoi(value); if (response_socket_timeout < 0) return "Invalid response socket timeout for WSGI daemon process."; } else if (!strcmp(option, "socket-user")) { uid_t socket_uid; if (!*value) return "Invalid socket user for WSGI daemon process."; socket_uid = ap_uname2id(value); if (*value == '#') { struct passwd *entry = NULL; if ((entry = getpwuid(socket_uid)) == NULL) return "Couldn't determine user name from socket user."; value = entry->pw_name; } socket_user = value; } else if (!strcmp(option, "script-user")) { uid_t script_uid; if (!*value) return "Invalid script user for WSGI daemon process."; script_uid = ap_uname2id(value); if (*value == '#') { struct passwd *entry = NULL; if ((entry = getpwuid(script_uid)) == NULL) return "Couldn't determine uid from script user."; value = entry->pw_name; } script_user = value; } else if (!strcmp(option, "script-group")) { gid_t script_gid; if (!*value) return "Invalid script group for WSGI daemon process."; script_gid = ap_gname2id(value); if (*value == '#') { struct group *entry = NULL; if ((entry = getgrgid(script_gid)) == NULL) return "Couldn't determine gid from script group."; value = entry->gr_name; } script_group = value; } else if (!strcmp(option, "cpu-time-limit")) { if (!*value) return "Invalid CPU time limit for WSGI daemon process."; cpu_time_limit = atoi(value); if (cpu_time_limit < 0) return "Invalid CPU time limit for WSGI daemon process."; } else if (!strcmp(option, "cpu-priority")) { if (!*value) return "Invalid CPU priority for WSGI daemon process."; cpu_priority = atoi(value); } else if (!strcmp(option, "memory-limit")) { if (!*value) return "Invalid memory limit for WSGI daemon process."; memory_limit = apr_atoi64(value); if (memory_limit < 0) return "Invalid memory limit for WSGI daemon process."; } else if (!strcmp(option, "virtual-memory-limit")) { if (!*value) return "Invalid virtual memory limit for WSGI daemon process."; virtual_memory_limit = apr_atoi64(value); if (virtual_memory_limit < 0) return "Invalid virtual memory limit for WSGI daemon process."; } else if (!strcmp(option, "server-metrics")) { if (!*value) return "Invalid server metrics flag for WSGI daemon process."; if (strcasecmp(value, "Off") == 0) server_metrics = 0; else if (strcasecmp(value, "On") == 0) server_metrics = 1; else return "Invalid server metrics flag for WSGI daemon process."; } else if (!strcmp(option, "newrelic-config-file")) { newrelic_config_file = value; } else if (!strcmp(option, "newrelic-environment")) { newrelic_environment = value; } else return "Invalid option to WSGI daemon process definition."; } if (script_user && script_group) return "Only one of script-user and script-group allowed."; if (groups_list) { const char *group_name = NULL; long groups_maximum = NGROUPS_MAX; const char *items = NULL; #ifdef _SC_NGROUPS_MAX groups_maximum = sysconf(_SC_NGROUPS_MAX); if (groups_maximum < 0) groups_maximum = NGROUPS_MAX; #endif groups = (gid_t *)apr_pcalloc(cmd->pool, groups_maximum*sizeof(groups[0])); groups[groups_count++] = gid; items = groups_list; group_name = ap_getword(cmd->pool, &items, ','); while (group_name && *group_name) { if (groups_count >= groups_maximum) return "Too many supplementary groups WSGI daemon process"; groups[groups_count++] = ap_gname2id(group_name); group_name = ap_getword(cmd->pool, &items, ','); } } if (!wsgi_daemon_list) { wsgi_daemon_list = apr_array_make(cmd->pool, 20, sizeof(WSGIProcessGroup)); apr_pool_cleanup_register(cmd->pool, &wsgi_daemon_list, ap_pool_cleanup_set_null, apr_pool_cleanup_null); } entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (!strcmp(entry->name, name)) return "Name duplicates previous WSGI daemon definition."; } wsgi_daemon_count++; entry = (WSGIProcessGroup *)apr_array_push(wsgi_daemon_list); entry->server = cmd->server; entry->random = random(); entry->id = wsgi_daemon_count; entry->name = apr_pstrdup(cmd->pool, name); entry->user = apr_pstrdup(cmd->pool, user); entry->group = apr_pstrdup(cmd->pool, group); entry->uid = uid; entry->gid = gid; entry->groups_list = groups_list; entry->groups_count = groups_count; entry->groups = groups; entry->processes = processes; entry->multiprocess = multiprocess; entry->threads = threads; entry->umask = umask; entry->root = root; entry->home = home; entry->lang = lang; entry->locale = locale; entry->python_home = python_home; entry->python_path = python_path; entry->python_eggs = python_eggs; entry->stack_size = stack_size; entry->maximum_requests = maximum_requests; entry->shutdown_timeout = shutdown_timeout; entry->startup_timeout = apr_time_from_sec(startup_timeout); entry->deadlock_timeout = apr_time_from_sec(deadlock_timeout); entry->inactivity_timeout = apr_time_from_sec(inactivity_timeout); entry->request_timeout = apr_time_from_sec(request_timeout); entry->graceful_timeout = apr_time_from_sec(graceful_timeout); entry->eviction_timeout = apr_time_from_sec(eviction_timeout); entry->restart_interval = apr_time_from_sec(restart_interval); entry->connect_timeout = apr_time_from_sec(connect_timeout); entry->socket_timeout = apr_time_from_sec(socket_timeout); entry->queue_timeout = apr_time_from_sec(queue_timeout); entry->socket_user = apr_pstrdup(cmd->pool, socket_user); entry->listen_backlog = listen_backlog; entry->display_name = display_name; entry->send_buffer_size = send_buffer_size; entry->recv_buffer_size = recv_buffer_size; entry->header_buffer_size = header_buffer_size; entry->response_buffer_size = response_buffer_size; if (response_socket_timeout == 0) response_socket_timeout = socket_timeout; entry->response_socket_timeout = apr_time_from_sec(response_socket_timeout); entry->script_user = script_user; entry->script_group = script_group; entry->cpu_time_limit = cpu_time_limit; entry->cpu_priority = cpu_priority; entry->memory_limit = memory_limit; entry->virtual_memory_limit = virtual_memory_limit; entry->server_metrics = server_metrics; entry->newrelic_config_file = newrelic_config_file; entry->newrelic_environment = newrelic_environment; entry->listener_fd = -1; return NULL; } static const char *wsgi_set_socket_prefix(cmd_parms *cmd, void *mconfig, const char *arg) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); sconfig->socket_prefix = ap_server_root_relative(cmd->pool, arg); if (!sconfig->socket_prefix) { return apr_pstrcat(cmd->pool, "Invalid WSGISocketPrefix '", arg, "'.", NULL); } return NULL; } static const char *wsgi_set_socket_rotation(cmd_parms *cmd, void *mconfig, const char *f) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); if (strcasecmp(f, "Off") == 0) sconfig->socket_rotation = 0; else if (strcasecmp(f, "On") == 0) sconfig->socket_rotation = 1; else return "WSGISocketRotation must be one of: Off | On"; return NULL; } static const char wsgi_valid_accept_mutex_string[] = "Valid accept mutex mechanisms for this platform are: default" #if APR_HAS_FLOCK_SERIALIZE ", flock" #endif #if APR_HAS_FCNTL_SERIALIZE ", fcntl" #endif #if APR_HAS_SYSVSEM_SERIALIZE ", sysvsem" #endif #if APR_HAS_POSIXSEM_SERIALIZE ", posixsem" #endif #if APR_HAS_PROC_PTHREAD_SERIALIZE ", pthread" #endif "."; static const char *wsgi_set_accept_mutex(cmd_parms *cmd, void *mconfig, const char *arg) { const char *error = NULL; WSGIServerConfig *sconfig = NULL; error = ap_check_cmd_context(cmd, GLOBAL_ONLY); if (error != NULL) return error; sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); #if !defined(AP_ACCEPT_MUTEX_TYPE) sconfig->lock_mechanism = ap_accept_lock_mech; #else sconfig->lock_mechanism = APR_LOCK_DEFAULT; #endif if (!strcasecmp(arg, "default")) { sconfig->lock_mechanism = APR_LOCK_DEFAULT; } #if APR_HAS_FLOCK_SERIALIZE else if (!strcasecmp(arg, "flock")) { sconfig->lock_mechanism = APR_LOCK_FLOCK; } #endif #if APR_HAS_FCNTL_SERIALIZE else if (!strcasecmp(arg, "fcntl")) { sconfig->lock_mechanism = APR_LOCK_FCNTL; } #endif #if APR_HAS_SYSVSEM_SERIALIZE else if (!strcasecmp(arg, "sysvsem")) { sconfig->lock_mechanism = APR_LOCK_SYSVSEM; } #endif #if APR_HAS_POSIXSEM_SERIALIZE else if (!strcasecmp(arg, "posixsem")) { sconfig->lock_mechanism = APR_LOCK_POSIXSEM; } #endif #if APR_HAS_PROC_PTHREAD_SERIALIZE else if (!strcasecmp(arg, "pthread")) { sconfig->lock_mechanism = APR_LOCK_PROC_PTHREAD; } #endif else { return apr_pstrcat(cmd->pool, "Accept mutex lock mechanism '", arg, "' is invalid. ", wsgi_valid_accept_mutex_string, NULL); } return NULL; } static apr_file_t *wsgi_signal_pipe_in = NULL; static apr_file_t *wsgi_signal_pipe_out = NULL; static void wsgi_signal_handler(int signum) { apr_size_t nbytes = 1; if (wsgi_daemon_pid != 0 && wsgi_daemon_pid != getpid()) exit(-1); if (signum == AP_SIG_GRACEFUL) { apr_file_write(wsgi_signal_pipe_out, "G", &nbytes); apr_file_flush(wsgi_signal_pipe_out); } else if (signum == SIGXCPU) { if (!wsgi_graceful_timeout) wsgi_daemon_shutdown++; apr_file_write(wsgi_signal_pipe_out, "C", &nbytes); apr_file_flush(wsgi_signal_pipe_out); } else { wsgi_daemon_shutdown++; apr_file_write(wsgi_signal_pipe_out, "S", &nbytes); apr_file_flush(wsgi_signal_pipe_out); } } static void wsgi_exit_daemon_process(int status) { if (wsgi_server && wsgi_daemon_group) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Exiting process '%s'.", getpid(), wsgi_daemon_group); } exit(status); } static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon); static void wsgi_manage_process(int reason, void *data, apr_wait_t status) { WSGIDaemonProcess *daemon = data; switch (reason) { /* Child daemon process has died. */ case APR_OC_REASON_DEATH: { int mpm_state; int stopping; /* * Determine if Apache is being shutdown or not and * if it is not being shutdown, we will need to * restart the child daemon process that has died. * If MPM doesn't support query assume that child * daemon process shouldn't be restarted. Both * prefork and worker MPMs support this query so * should always be okay. */ stopping = 1; if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state) == APR_SUCCESS && mpm_state != AP_MPMQ_STOPPING) { stopping = 0; } if (!stopping) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' has died, deregister and " "restart it.", daemon->process.pid, daemon->group->name); if (WIFEXITED(status)) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' terminated normally, exit code %d", daemon->process.pid, daemon->group->name, WEXITSTATUS(status)); } else if (WIFSIGNALED(status)) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' terminated by signal %d", daemon->process.pid, daemon->group->name, WTERMSIG(status)); } } else { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' has died but server is " "being stopped, deregister it.", daemon->process.pid, daemon->group->name); } /* Deregister existing process so we stop watching it. */ apr_proc_other_child_unregister(daemon); /* Now restart process if not shutting down. */ if (!stopping) wsgi_start_process(wsgi_parent_pool, daemon); break; } /* Apache is being restarted or shutdown. */ case APR_OC_REASON_RESTART: { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' to be deregistered, as server is " "restarting or being shutdown.", daemon->process.pid, daemon->group->name); /* Deregister existing process so we stop watching it. */ apr_proc_other_child_unregister(daemon); break; } /* Child daemon process vanished. */ case APR_OC_REASON_LOST: { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' appears to have been lost, " "deregister and restart it.", daemon->process.pid, daemon->group->name); /* Deregister existing process so we stop watching it. */ apr_proc_other_child_unregister(daemon); /* Restart the child daemon process that has died. */ wsgi_start_process(wsgi_parent_pool, daemon); break; } /* Call to unregister the process. */ case APR_OC_REASON_UNREGISTER: { /* Nothing to do at present. */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' has been deregistered and will " "no longer be monitored.", daemon->process.pid, daemon->group->name); break; } default: { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Process '%s' targeted by unexpected event %d.", daemon->process.pid, daemon->group->name, reason); } } } static void wsgi_setup_daemon_name(WSGIDaemonProcess *daemon, apr_pool_t *p) { const char *display_name = NULL; #if !(defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) long slen = 0; long dlen = 0; char *argv0 = NULL; #endif display_name = daemon->group->display_name; if (!display_name) return; if (!strcmp(display_name, "%{GROUP}")) { display_name = apr_pstrcat(p, "(wsgi:", daemon->group->name, ")", NULL); } /* * Only argv[0] is guaranteed to be the real things as MPM * modules may make modifications to subsequent arguments. * Thus can only replace the argv[0] value. Because length * is restricted, need to truncate display name if too long. */ #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) setproctitle("%s", display_name); #else argv0 = (char*)wsgi_server->process->argv[0]; dlen = strlen(argv0); slen = strlen(display_name); memset(argv0, ' ', dlen); if (slen < dlen) memcpy(argv0, display_name, slen); else memcpy(argv0, display_name, dlen); #endif } static int wsgi_setup_access(WSGIDaemonProcess *daemon) { /* Change to chroot environment. */ if (daemon->group->root) { if (chroot(daemon->group->root) == -1) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Unable to change root " "directory to '%s'.", getpid(), daemon->group->root); return -1; } } /* We don't need to switch user/group if not root. */ if (geteuid() == 0) { /* Setup the daemon process real and effective group. */ if (setgid(daemon->group->gid) == -1) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Unable to set group id " "to gid=%u.", getpid(), (unsigned)daemon->group->gid); return -1; } else { if (daemon->group->groups) { if (setgroups(daemon->group->groups_count, daemon->group->groups) == -1) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Unable " "to set supplementary groups for uname=%s " "of '%s'.", getpid(), daemon->group->user, daemon->group->groups_list); return -1; } } else if (initgroups(daemon->group->user, daemon->group->gid) == -1) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Unable " "to set groups for uname=%s and gid=%u.", getpid(), daemon->group->user, (unsigned)daemon->group->gid); return -1; } } /* Setup the daemon process real and effective user. */ if (setuid(daemon->group->uid) == -1) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Unable to change to uid=%ld.", getpid(), (long)daemon->group->uid); /* * On true UNIX systems this should always succeed at * this point. With certain Linux kernel versions though * we can get back EAGAIN where the target user had * reached their process limit. In that case will be left * running as wrong user. Just exit on all failures to be * safe. Don't die immediately to avoid a fork bomb. * * We could just return -1 here and let the caller do the * sleep() and exit() but this failure is critical enough * that we still do it here so it is obvious that the issue * is being addressed. */ ap_log_error(APLOG_MARK, APLOG_ALERT, 0, wsgi_server, "mod_wsgi (pid=%d): Failure to configure the " "daemon process correctly and process left in " "unspecified state. Restarting daemon process " "after delay.", getpid()); sleep(20); wsgi_exit_daemon_process(-1); return -1; } } /* * Setup the working directory for the process. It is either set to * what the 'home' option explicitly provides, or the home home * directory of the user, where it has been set to be different to * the user that Apache's own processes run as. */ if (daemon->group->home) { if (chdir(daemon->group->home) == -1) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Unable to change working " "directory to '%s'.", getpid(), daemon->group->home); return -1; } } else if (geteuid() != ap_unixd_config.user_id) { struct passwd *pwent; pwent = getpwuid(geteuid()); if (pwent) { if (chdir(pwent->pw_dir) == -1) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Unable to change working " "directory to home directory '%s' for uid=%ld.", getpid(), pwent->pw_dir, (long)geteuid()); return -1; } } else { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Unable to determine home " "directory for uid=%ld.", getpid(), (long)geteuid()); return -1; } } /* Setup the umask for the effective user. */ if (daemon->group->umask != -1) umask(daemon->group->umask); /* * Linux prevents generation of core dumps after setuid() * has been used. Attempt to reenable ability to dump core * so that the CoreDumpDirectory directive still works. */ #if defined(HAVE_PRCTL) && defined(PR_SET_DUMPABLE) /* This applies to Linux 2.4 and later. */ if (ap_coredumpdir_configured) { if (prctl(PR_SET_DUMPABLE, 1)) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Set dumpable failed. This child " "will not coredump after software errors.", getpid()); } } #endif return 0; } static int wsgi_setup_socket(WSGIProcessGroup *process) { int sockfd = -1; struct sockaddr_un addr; mode_t omask; int rc; int sendsz = process->send_buffer_size; int recvsz = process->recv_buffer_size; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Socket for '%s' is '%s'.", getpid(), process->name, process->socket_path); if ((sockfd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't create unix domain " "socket.", getpid()); return -1; } #ifdef SO_SNDBUF if (sendsz) { if (setsockopt(sockfd, SOL_SOCKET, SO_SNDBUF, (void *)&sendsz, sizeof(sendsz)) == -1) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, wsgi_server, "mod_wsgi (pid=%d): Failed to set send buffer " "size on daemon process socket.", getpid()); } } #endif #ifdef SO_RCVBUF if (recvsz) { if (setsockopt(sockfd, SOL_SOCKET, SO_RCVBUF, (void *)&recvsz, sizeof(recvsz)) == -1) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, wsgi_server, "mod_wsgi (pid=%d): Failed to set receive buffer " "size on daemon process socket.", getpid()); } } #endif if (strlen(process->socket_path) > sizeof(addr.sun_path)) { ap_log_error(APLOG_MARK, APLOG_ALERT, 0, wsgi_server, "mod_wsgi (pid=%d): Length of path for daemon process " "socket exceeds maxmimum allowed value and will be " "truncated, resulting in likely failure to bind the " "socket, or other later related failure.", getpid()); } memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; apr_cpystrn(addr.sun_path, process->socket_path, sizeof(addr.sun_path)); omask = umask(0077); rc = bind(sockfd, (struct sockaddr *)&addr, sizeof(addr)); if (rc < 0 && errno == EADDRINUSE) { ap_log_error(APLOG_MARK, APLOG_WARNING, errno, wsgi_server, "mod_wsgi (pid=%d): Removing stale unix domain " "socket '%s'.", getpid(), process->socket_path); unlink(process->socket_path); rc = bind(sockfd, (struct sockaddr *)&addr, sizeof(addr)); } umask(omask); if (rc < 0) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't bind unix domain " "socket '%s'.", getpid(), process->socket_path); close(sockfd); return -1; } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Listen backlog for socket '%s' is '%d'.", getpid(), process->socket_path, process->listen_backlog); if (listen(sockfd, process->listen_backlog) < 0) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't listen on unix domain " "socket.", getpid()); close(sockfd); return -1; } /* * Set the ownership of the UNIX listener socket. This would * normally be the Apache user that the Apache server child * processes run as, as they are the only processes that * would connect to the sockets. In the case of ITK MPM, * having them owned by Apache user is useless as at the * time the request is to be proxied, the Apache server * child process will have uid corresponding to the user * whose request they are handling. For ITK, thus set the * ownership to be the same as the daemon processes. This is * still restrictive, in that can only connect to daemon * process group running under same user, but most of the * time that is what you would want anyway when using ITK * MPM. */ if (!geteuid()) { #if defined(MPM_ITK) || defined(ITK_MPM) uid_t socket_uid = process->uid; #else uid_t socket_uid = ap_unixd_config.user_id; #endif if (process->socket_user) socket_uid = ap_uname2id(process->socket_user); if (chown(process->socket_path, socket_uid, -1) < 0) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't change owner of unix " "domain socket '%s' to uid=%ld.", getpid(), process->socket_path, (long)socket_uid); close(sockfd); return -1; } } return sockfd; } static int wsgi_hook_daemon_handler(conn_rec *c); static void wsgi_process_socket(apr_pool_t *p, apr_socket_t *sock, apr_bucket_alloc_t *bucket_alloc, WSGIDaemonProcess *daemon) { apr_status_t rv; conn_rec *c; ap_sb_handle_t *sbh; core_net_rec *net; /* * This duplicates Apache connection setup. This is done * here rather than letting Apache do it so that avoid the * possibility that any Apache modules, such as mod_ssl * will add their own input/output filters to the chain. */ #if AP_MODULE_MAGIC_AT_LEAST(20110619,0) /* For 2.4 a NULL sbh pointer should work. */ sbh = NULL; #else /* For 2.2 a dummy sbh pointer is needed. */ ap_create_sb_handle(&sbh, p, -1, 0); #endif c = (conn_rec *)apr_pcalloc(p, sizeof(conn_rec)); c->sbh = sbh; c->conn_config = ap_create_conn_config(p); c->notes = apr_table_make(p, 5); c->pool = p; if ((rv = apr_socket_addr_get(&c->local_addr, APR_LOCAL, sock)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_INFO, rv, wsgi_server, "mod_wsgi (pid=%d): Failed call " "apr_socket_addr_get(APR_LOCAL).", getpid()); apr_socket_close(sock); return; } apr_sockaddr_ip_get(&c->local_ip, c->local_addr); #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) if ((rv = apr_socket_addr_get(&c->client_addr, APR_REMOTE, sock)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_INFO, rv, wsgi_server, "mod_wsgi (pid=%d): Failed call " "apr_socket_addr_get(APR_REMOTE).", getpid()); apr_socket_close(sock); return; } c->client_ip = "unknown"; #else if ((rv = apr_socket_addr_get(&c->remote_addr, APR_REMOTE, sock)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_INFO, rv, wsgi_server, "mod_wsgi (pid=%d): Failed call " "apr_socket_addr_get(APR_REMOTE).", getpid()); apr_socket_close(sock); return; } c->remote_ip = "unknown"; #endif c->base_server = daemon->group->server; c->bucket_alloc = bucket_alloc; c->id = 1; net = apr_palloc(c->pool, sizeof(core_net_rec)); if (daemon->group->socket_timeout) rv = apr_socket_timeout_set(sock, daemon->group->socket_timeout); else rv = apr_socket_timeout_set(sock, c->base_server->timeout); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_DEBUG, rv, wsgi_server, "mod_wsgi (pid=%d): Failed call " "apr_socket_timeout_set().", getpid()); } net->c = c; net->in_ctx = NULL; net->out_ctx = NULL; net->client_socket = sock; ap_set_module_config(net->c->conn_config, &core_module, sock); ap_add_input_filter_handle(ap_core_input_filter_handle, net, NULL, net->c); ap_add_output_filter_handle(ap_core_output_filter_handle, net, NULL, net->c); wsgi_hook_daemon_handler(c); ap_lingering_close(c); } static apr_status_t wsgi_worker_acquire(int id) { WSGIThreadStack *stack = wsgi_worker_stack; WSGIDaemonThread *thread = &wsgi_worker_threads[id]; while (1) { apr_uint32_t state = stack->state; if (state & (WSGI_STACK_TERMINATED | WSGI_STACK_NO_LISTENER)) { if (state & WSGI_STACK_TERMINATED) { return APR_EINVAL; } if (apr_atomic_cas32(&(stack->state), WSGI_STACK_LAST, state) != state) { continue; } else { return APR_SUCCESS; } } thread->next = state; if (apr_atomic_cas32(&(stack->state), (unsigned)id, state) != state) { continue; } else { apr_status_t rv; if (thread->wakeup) { thread->wakeup = 0; return APR_SUCCESS; } rv = apr_thread_cond_wait(thread->condition, thread->mutex); while (rv == APR_SUCCESS && !thread->wakeup) rv = apr_thread_cond_wait(thread->condition, thread->mutex); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server, "mod_wsgi (pid=%d): " "Wait on thread %d wakeup condition variable " "failed.", getpid(), id); } thread->wakeup = 0; return rv; } } } static apr_status_t wsgi_worker_release(void) { WSGIThreadStack *stack = wsgi_worker_stack; while (1) { apr_uint32_t state = stack->state; unsigned int first = state & WSGI_STACK_HEAD; if (first == WSGI_STACK_LAST) { if (apr_atomic_cas32(&(stack->state), state | WSGI_STACK_NO_LISTENER, state) != state) { continue; } else { return APR_SUCCESS; } } else { WSGIDaemonThread *thread = &wsgi_worker_threads[first]; if (apr_atomic_cas32(&(stack->state), (state ^ first) | thread->next, state) != state) { continue; } else { /* * Flag that thread should be woken up and then * signal it via the condition variable. */ apr_status_t rv; if ((rv = apr_thread_mutex_lock(thread->mutex)) != APR_SUCCESS) { return rv; } thread->wakeup = 1; if ((rv = apr_thread_mutex_unlock(thread->mutex)) != APR_SUCCESS) { return rv; } return apr_thread_cond_signal(thread->condition); } } } } static apr_status_t wsgi_worker_shutdown(void) { int i; apr_status_t rv; WSGIThreadStack *stack = wsgi_worker_stack; while (1) { apr_uint32_t state = stack->state; if (apr_atomic_cas32(&(stack->state), state | WSGI_STACK_TERMINATED, state) == state) { break; } } for (i = 0; i < wsgi_daemon_process->group->threads; i++) { if ((rv = wsgi_worker_release()) != APR_SUCCESS) { return rv; } } return APR_SUCCESS; } static void wsgi_daemon_worker(apr_pool_t *p, WSGIDaemonThread *thread) { apr_status_t status; apr_socket_t *socket; apr_pool_t *ptrans; apr_pollset_t *pollset; apr_pollfd_t pfd = { 0 }; apr_int32_t numdesc; const apr_pollfd_t *pdesc; apr_bucket_alloc_t *bucket_alloc; WSGIDaemonProcess *daemon = thread->process; WSGIProcessGroup *group = daemon->group; /* Loop until signal received to shutdown daemon process. */ while (!wsgi_daemon_shutdown) { apr_status_t rv; /* * Only allow one thread in this process to attempt to * acquire the global process lock as the global process * lock will actually allow all threads in this process * through once one in this process acquires lock. Only * allowing one means better chance of another process * subsequently getting it thereby distributing requests * across processes better and reducing chance of Python * GIL contention. */ wsgi_worker_acquire(thread->id); if (wsgi_daemon_shutdown) break; if (group->mutex) { /* * Grab the accept mutex across all daemon processes * in this process group. */ rv = apr_proc_mutex_lock(group->mutex); if (rv != APR_SUCCESS) { #if 0 #if defined(EIDRM) /* * When using multiple threads locking the * process accept mutex fails with an EIDRM when * process being shutdown but signal check * hasn't triggered quick enough to set shutdown * flag. This causes lots of error messages to * be logged which make it look like something * nasty has happened even when it hasn't. For * now assume that if multiple threads and EIDRM * occurs that it is okay and the process is * being shutdown. The condition should by * rights only occur when the Apache parent * process is being shutdown or has died for * some reason so daemon process would logically * therefore also be in process of being * shutdown or killed. */ if (!strcmp(apr_proc_mutex_name(group->mutex), "sysvsem")) { if (errno == EIDRM && group->threads > 1) wsgi_daemon_shutdown = 1; } #endif #endif if (!wsgi_daemon_shutdown) { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server, "mod_wsgi (pid=%d): " "Couldn't acquire accept mutex '%s'. " "Shutting down daemon process.", getpid(), group->socket_path); wsgi_daemon_shutdown++; kill(getpid(), SIGTERM); sleep(5); } break; } /* * Daemon process being shutdown so don't accept the * connection after all. */ if (wsgi_daemon_shutdown) { apr_proc_mutex_unlock(group->mutex); wsgi_worker_release(); break; } } apr_pool_create(&ptrans, p); /* * Accept socket connection from the child process. We * test the socket for whether it is ready before actually * performing the accept() so that can know for sure that * we will be processing a request and flag thread as * running. Only bother to do join with thread which is * actually running when process is being shutdown. */ apr_pollset_create(&pollset, 1, ptrans, 0); memset(&pfd, '\0', sizeof(pfd)); pfd.desc_type = APR_POLL_SOCKET; pfd.desc.s = daemon->listener; pfd.reqevents = APR_POLLIN; pfd.client_data = daemon; apr_pollset_add(pollset, &pfd); rv = apr_pollset_poll(pollset, -1, &numdesc, &pdesc); if (rv != APR_SUCCESS && !APR_STATUS_IS_EINTR(rv)) { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server, "mod_wsgi (pid=%d): " "Unable to poll daemon socket for '%s'. " "Shutting down daemon process.", getpid(), group->socket_path); wsgi_daemon_shutdown++; kill(getpid(), SIGTERM); sleep(5); break; } if (wsgi_daemon_shutdown) { if (group->mutex) apr_proc_mutex_unlock(group->mutex); wsgi_worker_release(); apr_pool_destroy(ptrans); break; } if (rv != APR_SUCCESS && APR_STATUS_IS_EINTR(rv)) { if (group->mutex) apr_proc_mutex_unlock(group->mutex); wsgi_worker_release(); apr_pool_destroy(ptrans); continue; } thread->running = 1; status = apr_socket_accept(&socket, daemon->listener, ptrans); if (group->mutex) { apr_status_t rv; rv = apr_proc_mutex_unlock(group->mutex); if (rv != APR_SUCCESS) { if (!wsgi_daemon_shutdown) { wsgi_worker_release(); ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server, "mod_wsgi (pid=%d): " "Couldn't release accept mutex '%s'.", getpid(), group->socket_path); apr_pool_destroy(ptrans); thread->running = 0; break; } } } wsgi_worker_release(); if (status != APR_SUCCESS && APR_STATUS_IS_EINTR(status)) { apr_pool_destroy(ptrans); thread->running = 0; continue; } /* Process the request proxied from the child process. */ apr_thread_mutex_lock(wsgi_monitor_lock); thread->request = apr_time_now(); apr_thread_mutex_unlock(wsgi_monitor_lock); bucket_alloc = apr_bucket_alloc_create(ptrans); wsgi_process_socket(ptrans, socket, bucket_alloc, daemon); apr_thread_mutex_lock(wsgi_monitor_lock); thread->request = 0; apr_thread_mutex_unlock(wsgi_monitor_lock); /* Cleanup ready for next request. */ apr_pool_destroy(ptrans); thread->running = 0; /* Check to see if maximum number of requests reached. */ if (daemon->group->maximum_requests) { if (--wsgi_request_count <= 0) { if (wsgi_graceful_timeout && wsgi_active_requests) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Maximum requests " "reached, attempt a graceful shutdown " "'%s'.", getpid(), daemon->group->name); apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_graceful_shutdown_time = apr_time_now(); wsgi_graceful_shutdown_time += wsgi_graceful_timeout; apr_thread_mutex_unlock(wsgi_monitor_lock); } else { if (!wsgi_daemon_shutdown) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Maximum requests " "reached, triggering immediate shutdown " "'%s'.", getpid(), daemon->group->name); } wsgi_daemon_shutdown++; kill(getpid(), SIGINT); } } } /* Check if graceful shutdown and no active requests. */ if (wsgi_daemon_graceful && !wsgi_daemon_shutdown) { if (wsgi_active_requests == 0) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Requests have completed, " "triggering immediate shutdown '%s'.", getpid(), daemon->group->name); wsgi_daemon_shutdown++; kill(getpid(), SIGINT); } } } if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Exiting thread %d in daemon " "process '%s'.", getpid(), thread->id, thread->process->group->name); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Exiting thread %d in daemon " "process '%s'.", getpid(), thread->id, thread->process->group->name); } } static void *wsgi_daemon_thread(apr_thread_t *thd, void *data) { WSGIDaemonThread *thread = data; apr_pool_t *p = apr_thread_pool_get(thd); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Started thread %d in daemon " "process '%s'.", getpid(), thread->id, thread->process->group->name); } else { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Started thread %d in daemon " "process '%s'.", getpid(), thread->id, thread->process->group->name); } apr_thread_mutex_lock(thread->mutex); wsgi_daemon_worker(p, thread); apr_thread_exit(thd, APR_SUCCESS); return NULL; } static void *wsgi_reaper_thread(apr_thread_t *thd, void *data) { WSGIDaemonProcess *daemon = data; sleep(daemon->group->shutdown_timeout); ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Aborting process '%s'.", getpid(), daemon->group->name); wsgi_exit_daemon_process(-1); return NULL; } static void *wsgi_deadlock_thread(apr_thread_t *thd, void *data) { WSGIDaemonProcess *daemon = data; PyGILState_STATE gilstate; if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Enable deadlock thread in " "process '%s'.", getpid(), daemon->group->name); } apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_deadlock_shutdown_time = apr_time_now(); wsgi_deadlock_shutdown_time += wsgi_deadlock_timeout; apr_thread_mutex_unlock(wsgi_monitor_lock); while (1) { apr_sleep(apr_time_from_sec(1)); apr_thread_mutex_lock(wsgi_shutdown_lock); if (!wsgi_daemon_shutdown) { gilstate = PyGILState_Ensure(); PyGILState_Release(gilstate); } apr_thread_mutex_unlock(wsgi_shutdown_lock); apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_deadlock_shutdown_time = apr_time_now(); wsgi_deadlock_shutdown_time += wsgi_deadlock_timeout; apr_thread_mutex_unlock(wsgi_monitor_lock); } return NULL; } static void *wsgi_monitor_thread(apr_thread_t *thd, void *data) { WSGIDaemonProcess *daemon = data; WSGIProcessGroup *group = daemon->group; int restart = 0; if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Enable monitor thread in " "process '%s'.", getpid(), group->name); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Startup timeout is %d.", getpid(), (int)(apr_time_sec(wsgi_startup_timeout))); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Deadlock timeout is %d.", getpid(), (int)(apr_time_sec(wsgi_deadlock_timeout))); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Idle inactivity timeout is %d.", getpid(), (int)(apr_time_sec(wsgi_idle_timeout))); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Request time limit is %d.", getpid(), (int)(apr_time_sec(wsgi_request_timeout))); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Graceful timeout is %d.", getpid(), (int)(apr_time_sec(wsgi_graceful_timeout))); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Eviction timeout is %d.", getpid(), (int)(apr_time_sec(wsgi_eviction_timeout))); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Restart interval is %d.", getpid(), (int)(apr_time_sec(wsgi_restart_interval))); } /* * If a restart interval was specified then set up the time for * when the restart should occur. */ if (wsgi_restart_interval) { wsgi_restart_shutdown_time = apr_time_now(); wsgi_restart_shutdown_time += wsgi_restart_interval; } while (1) { apr_time_t now; apr_time_t startup_time; apr_time_t deadlock_time; apr_time_t idle_time; apr_time_t graceful_time; apr_time_t restart_time; apr_time_t request_time = 0; apr_interval_time_t period = 0; int i = 0; now = apr_time_now(); apr_thread_mutex_lock(wsgi_monitor_lock); startup_time = wsgi_startup_shutdown_time; deadlock_time = wsgi_deadlock_shutdown_time; idle_time = wsgi_idle_shutdown_time; graceful_time = wsgi_graceful_shutdown_time; restart_time = wsgi_restart_shutdown_time; if (wsgi_request_timeout && wsgi_worker_threads) { for (i = 0; igroup->threads; i++) { if (wsgi_worker_threads[i].request) request_time += (now - wsgi_worker_threads[i].request); } } request_time /= wsgi_daemon_process->group->threads; apr_thread_mutex_unlock(wsgi_monitor_lock); if (!restart && wsgi_request_timeout) { if (request_time > wsgi_request_timeout) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Daemon process request " "time limit exceeded, stopping process " "'%s'.", getpid(), group->name); wsgi_shutdown_reason = "request_timeout"; wsgi_dump_stack_traces = 1; restart = 1; } } if (!restart && wsgi_startup_timeout) { if (startup_time > 0) { if (startup_time <= now) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Application startup " "timer expired, stopping process '%s'.", getpid(), group->name); wsgi_shutdown_reason = "startup_timeout"; restart = 1; } else { period = startup_time - now; } } } if (!restart && wsgi_restart_interval) { if (restart_time > 0) { if (restart_time <= now) { if (!wsgi_daemon_graceful) { if (wsgi_active_requests) { wsgi_daemon_graceful++; apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_graceful_shutdown_time = apr_time_now(); wsgi_graceful_shutdown_time += wsgi_graceful_timeout; apr_thread_mutex_unlock(wsgi_monitor_lock); ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Application restart timer expired, " "waiting for requests to complete " "'%s'.", getpid(), daemon->group->name); } else { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): " "Application restart timer expired, " "stopping process '%s'.", getpid(), daemon->group->name); wsgi_shutdown_reason = "restart_interval"; restart = 1; } } } else { if (!period || ((restart_time - now) < period)) period = restart_time - now; } } } if (!restart && wsgi_deadlock_timeout) { if (deadlock_time) { if (deadlock_time <= now) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Daemon process deadlock " "timer expired, stopping process '%s'.", getpid(), group->name); restart = 1; } else { if (!period || ((deadlock_time - now) < period)) period = deadlock_time - now; } } else { if (!period || (wsgi_deadlock_timeout < period)) period = wsgi_deadlock_timeout; } } if (!restart && wsgi_idle_timeout) { if (idle_time) { if (idle_time <= now) { if (wsgi_active_requests == 0) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Daemon process " "idle inactivity timer expired, " "stopping process '%s'.", getpid(), group->name); wsgi_shutdown_reason = "inactivity_timeout"; restart = 1; } else { /* Ignore for now as still have requests. */ if (!period || (wsgi_idle_timeout < period)) period = wsgi_idle_timeout; } } else { if (!period || ((idle_time - now) < period)) period = idle_time - now; } } else { if (!period || (wsgi_idle_timeout < period)) period = wsgi_idle_timeout; } } if (!restart && wsgi_graceful_timeout) { if (graceful_time) { if (graceful_time <= now) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Daemon process " "graceful timer expired '%s'.", getpid(), group->name); restart = 1; } else { if (!period || ((graceful_time - now) < period)) period = graceful_time - now; else if (wsgi_graceful_timeout < period) period = wsgi_graceful_timeout; } } else { if (!period || (wsgi_graceful_timeout < period)) period = wsgi_graceful_timeout; } } if (!restart && wsgi_eviction_timeout) { if (graceful_time) { if (graceful_time <= now) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Daemon process " "graceful timer expired '%s'.", getpid(), group->name); restart = 1; } else { if (!period || ((graceful_time - now) < period)) period = graceful_time - now; else if (wsgi_eviction_timeout < period) period = wsgi_eviction_timeout; } } else { if (!period || (wsgi_eviction_timeout < period)) period = wsgi_eviction_timeout; } } if (restart) { wsgi_daemon_shutdown++; kill(getpid(), SIGINT); } if (restart || wsgi_request_timeout || period <= 0 || (wsgi_startup_timeout && !wsgi_startup_shutdown_time)) { period = apr_time_from_sec(1); } apr_sleep(period); } return NULL; } #if (PY_MAJOR_VERSION >= 3) || (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 5) static void wsgi_log_stack_traces(void) { PyGILState_STATE state; PyObject *threads = NULL; /* * This should only be called on shutdown so don't try and log * any errors, just dump them straight out. */ state = PyGILState_Ensure(); threads = _PyThread_CurrentFrames(); if (threads && PyDict_Size(threads) != 0) { PyObject *seq = NULL; seq = PyObject_GetIter(threads); if (seq) { PyObject *id = NULL; PyObject *frame = NULL; Py_ssize_t i = 0; ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Dumping stack trace for " "active Python threads.", getpid()); while (PyDict_Next(threads, &i, &id, &frame)) { apr_int64_t thread_id = 0; PyFrameObject *current = NULL; thread_id = PyLong_AsLong(id); current = (PyFrameObject *)frame; while (current) { int lineno; const char *filename = NULL; const char *name = NULL; #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 9) lineno = PyFrame_GetLineNumber(current); #else if (current->f_trace) { lineno = current->f_lineno; } else { lineno = PyCode_Addr2Line(current->f_code, current->f_lasti); } #endif #if PY_MAJOR_VERSION >= 3 #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 9) filename = PyUnicode_AsUTF8(PyFrame_GetCode(current)->co_filename); name = PyUnicode_AsUTF8(PyFrame_GetCode(current)->co_name); #else filename = PyUnicode_AsUTF8(current->f_code->co_filename); name = PyUnicode_AsUTF8(current->f_code->co_name); #endif #else filename = PyString_AsString(current->f_code->co_filename); name = PyString_AsString(current->f_code->co_name); #endif if (current == (PyFrameObject *)frame) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Thread %" APR_INT64_T_FMT " executing file \"%s\", line %d, in %s", getpid(), thread_id, filename, lineno, name); } else { #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 9) if (PyFrame_GetBack(current)) { #else if (current->f_back) { #endif ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): called from file " "\"%s\", line %d, in %s,", getpid(), filename, lineno, name); } else { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): called from file " "\"%s\", line %d, in %s.", getpid(), filename, lineno, name); } } #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 9) current = PyFrame_GetBack(current); #else current = current->f_back; #endif } } } else { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Failed to iterate over " "current frames for active threads.", getpid()); PyErr_Print(); PyErr_Clear(); } } else { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Failed to get current frames " "for active threads.", getpid()); PyErr_Print(); PyErr_Clear(); } Py_XDECREF(threads); PyGILState_Release(state); } #endif static void wsgi_daemon_main(apr_pool_t *p, WSGIDaemonProcess *daemon) { apr_threadattr_t *thread_attr; apr_thread_t *reaper = NULL; int i; apr_status_t rv; apr_status_t thread_rv; apr_pollfd_t poll_fd; apr_int32_t poll_count = 0; /* * Setup poll object for listening for shutdown notice from * signal handler. */ poll_fd.desc_type = APR_POLL_FILE; poll_fd.reqevents = APR_POLLIN; poll_fd.desc.f = wsgi_signal_pipe_in; /* Initialise maximum request count for daemon. */ if (daemon->group->maximum_requests) wsgi_request_count = daemon->group->maximum_requests; /* Ensure that threads are joinable. */ apr_threadattr_create(&thread_attr, p); apr_threadattr_detach_set(thread_attr, 0); #if (APR_MAJOR_VERSION >= 1) if (daemon->group->stack_size) { apr_threadattr_stacksize_set(thread_attr, daemon->group->stack_size); } #endif /* Start monitoring thread if required. */ wsgi_startup_timeout = daemon->group->startup_timeout; wsgi_deadlock_timeout = daemon->group->deadlock_timeout; wsgi_idle_timeout = daemon->group->inactivity_timeout; wsgi_request_timeout = daemon->group->request_timeout; wsgi_graceful_timeout = daemon->group->graceful_timeout; wsgi_eviction_timeout = daemon->group->eviction_timeout; wsgi_restart_interval = daemon->group->restart_interval; if (wsgi_deadlock_timeout || wsgi_idle_timeout) { rv = apr_thread_create(&reaper, thread_attr, wsgi_monitor_thread, daemon, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server, "mod_wsgi (pid=%d): Couldn't create monitor " "thread in daemon process '%s'.", getpid(), daemon->group->name); } } if (wsgi_deadlock_timeout) { if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server, "mod_wsgi (pid=%d): Couldn't create deadlock " "thread in daemon process '%s'.", getpid(), daemon->group->name); } rv = apr_thread_create(&reaper, thread_attr, wsgi_deadlock_thread, daemon, p); } /* Initialise worker stack. */ wsgi_worker_stack = (WSGIThreadStack *)apr_palloc(p, sizeof(WSGIThreadStack)); wsgi_worker_stack->state = WSGI_STACK_NO_LISTENER | WSGI_STACK_LAST; /* Start the required number of threads. */ wsgi_worker_threads = (WSGIDaemonThread *)apr_pcalloc(p, daemon->group->threads * sizeof(WSGIDaemonThread)); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Starting %d threads in daemon " "process '%s'.", getpid(), daemon->group->threads, daemon->group->name); } for (i=0; igroup->threads; i++) { WSGIDaemonThread *thread = &wsgi_worker_threads[i]; if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Starting thread %d in daemon " "process '%s'.", getpid(), i+1, daemon->group->name); } /* Create the mutex and condition variable for this thread. */ rv = apr_thread_cond_create(&thread->condition, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server, "mod_wsgi (pid=%d): Couldn't create worker " "thread %d state condition variable in daemon " "process '%s'.", getpid(), i, daemon->group->name); /* * Try to force an exit of the process if fail * to create the worker threads. */ kill(getpid(), SIGTERM); sleep(5); } rv = apr_thread_mutex_create(&thread->mutex, APR_THREAD_MUTEX_DEFAULT, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server, "mod_wsgi (pid=%d): Couldn't create worker " "thread %d state mutex variable in daemon " "process '%s'.", getpid(), i, daemon->group->name); /* * Try to force an exit of the process if fail * to create the worker threads. */ kill(getpid(), SIGTERM); sleep(5); } /* Now create the actual thread. */ thread->id = i; thread->process = daemon; thread->running = 0; thread->request = 0; rv = apr_thread_create(&thread->thread, thread_attr, wsgi_daemon_thread, thread, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server, "mod_wsgi (pid=%d): Couldn't create worker " "thread %d in daemon process '%s'.", getpid(), i, daemon->group->name); /* * Try to force an exit of the process if fail * to create the worker threads. */ kill(getpid(), SIGTERM); sleep(5); } } /* Block until we get a process shutdown signal. */ while (1) { char buf[1]; apr_size_t nbytes = 1; rv = apr_poll(&poll_fd, 1, &poll_count, -1); if (APR_STATUS_IS_EINTR(rv)) continue; rv = apr_file_read(wsgi_signal_pipe_in, buf, &nbytes); if (rv != APR_SUCCESS || nbytes != 1) { ap_log_error(APLOG_MARK, APLOG_ALERT, 0, wsgi_server, "mod_wsgi (pid=%d): Failed read on signal pipe '%s'.", getpid(), daemon->group->name); break; } if (buf[0] == 'C') { if (!wsgi_daemon_graceful) { wsgi_shutdown_reason = "cpu_time_limit"; if (wsgi_active_requests) { wsgi_daemon_graceful++; apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_graceful_shutdown_time = apr_time_now(); wsgi_graceful_shutdown_time += wsgi_graceful_timeout; apr_thread_mutex_unlock(wsgi_monitor_lock); ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Exceeded CPU time " "limit, waiting for requests to complete " "'%s'.", getpid(), daemon->group->name); } else { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Exceeded CPU time " "limit, triggering immediate shutdown " "'%s'.", getpid(), daemon->group->name); wsgi_daemon_shutdown++; kill(getpid(), SIGINT); } } } else if (buf[0] == 'G') { if (!wsgi_daemon_graceful) { wsgi_shutdown_reason = "graceful_signal"; if (wsgi_active_requests) { wsgi_daemon_graceful++; apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_graceful_shutdown_time = apr_time_now(); if (wsgi_eviction_timeout) wsgi_graceful_shutdown_time += wsgi_eviction_timeout; else wsgi_graceful_shutdown_time += wsgi_graceful_timeout; apr_thread_mutex_unlock(wsgi_monitor_lock); ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Process eviction " "requested, waiting for requests to complete " "'%s'.", getpid(), daemon->group->name); } else { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Process eviction " "requested, triggering immediate shutdown " "'%s'.", getpid(), daemon->group->name); wsgi_daemon_shutdown++; kill(getpid(), SIGINT); } } } else break; } ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Shutdown requested '%s'.", getpid(), daemon->group->name); /* * Create a reaper thread to abort process if graceful * shutdown takes too long. Not recommended to disable * this unless external process is controlling shutdown. */ if (daemon->group->shutdown_timeout) { rv = apr_thread_create(&reaper, thread_attr, wsgi_reaper_thread, daemon, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server, "mod_wsgi (pid=%d): Couldn't create reaper " "thread in daemon process '%s'.", getpid(), daemon->group->name); } } /* * If shutting down process due to reaching request time * limit, then try and dump out stack traces of any threads * which are running as a debugging aid. */ wsgi_publish_process_stopping(wsgi_shutdown_reason); #if (PY_MAJOR_VERSION >= 3) || (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 5) if (wsgi_dump_stack_traces) wsgi_log_stack_traces(); #endif /* * Attempt a graceful shutdown by waiting for any * threads which were processing a request at the time * of shutdown. In some respects this is a bit pointless * as even though we allow the requests to be completed, * the Apache child process which proxied the request * through to this daemon process could get killed off * before the daemon process and so the response gets * cut off or lost. */ wsgi_worker_shutdown(); for (i=0; igroup->threads; i++) { if (wsgi_worker_threads[i].thread && wsgi_worker_threads[i].running) { rv = apr_thread_join(&thread_rv, wsgi_worker_threads[i].thread); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server, "mod_wsgi (pid=%d): Couldn't join with " "worker thread %d in daemon process '%s'.", getpid(), i, daemon->group->name); } } } } static apr_status_t wsgi_cleanup_process(void *data) { WSGIProcessGroup *group = (WSGIProcessGroup *)data; /* Only do cleanup if in Apache parent process. */ if (wsgi_parent_pid != getpid()) return APR_SUCCESS; if (group->listener_fd != -1) { if (close(group->listener_fd) < 0) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, wsgi_server, "mod_wsgi (pid=%d): " "Couldn't close unix domain socket '%s'.", getpid(), group->socket_path); } if (unlink(group->socket_path) < 0 && errno != ENOENT) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, wsgi_server, "mod_wsgi (pid=%d): " "Couldn't unlink unix domain socket '%s'.", getpid(), group->socket_path); } } return APR_SUCCESS; } static int wsgi_start_process(apr_pool_t *p, WSGIDaemonProcess *daemon) { apr_status_t status; ap_listen_rec *lr; WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i = 0; if ((status = apr_proc_fork(&daemon->process, p)) < 0) { ap_log_error(APLOG_MARK, APLOG_ALERT, errno, wsgi_server, "mod_wsgi: Couldn't spawn process '%s'.", daemon->group->name); return DECLINED; } else if (status == APR_INCHILD) { if (!geteuid()) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Starting process '%s' with " "uid=%ld, gid=%u and threads=%d.", getpid(), daemon->group->name, (long)daemon->group->uid, (unsigned)daemon->group->gid, daemon->group->threads); } else { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Starting process '%s' with " "threads=%d.", getpid(), daemon->group->name, daemon->group->threads); } #ifdef HAVE_BINDPROCESSOR /* * By default, AIX binds to a single processor. This * bit unbinds children which will then bind to another * CPU. */ status = bindprocessor(BINDPROCESS, (int)getpid(), PROCESSOR_CLASS_ANY); if (status != OK) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, wsgi_server, "mod_wsgi (pid=%d): Failed to unbind processor.", getpid()); } #endif /* Setup daemon process name displayed by 'ps'. */ wsgi_setup_daemon_name(daemon, p); /* Adjust CPU priority if overridden. */ if (daemon->group->cpu_priority != 0) { if (setpriority(PRIO_PROCESS, 0, daemon->group->cpu_priority) == -1) { ap_log_error(APLOG_MARK, APLOG_ERR, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't set CPU priority " "in daemon process '%d'.", getpid(), daemon->group->cpu_priority); } } /* Setup daemon process user/group/umask etc. */ if (wsgi_setup_access(daemon) == -1) { /* * If we get any failure from setting up the appropriate * permissions or working directory for the daemon process * then we exit the process. Don't die immediately to avoid * a fork bomb. */ ap_log_error(APLOG_MARK, APLOG_ALERT, 0, wsgi_server, "mod_wsgi (pid=%d): Failure to configure the " "daemon process correctly and process left in " "unspecified state. Restarting daemon process " "after delay.", getpid()); sleep(20); wsgi_exit_daemon_process(-1); } /* Reinitialise accept mutex in daemon process. */ if (daemon->group->mutex) { status = apr_proc_mutex_child_init(&daemon->group->mutex, daemon->group->mutex_path, p); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server, "mod_wsgi (pid=%d): Couldn't initialise accept " "mutex in daemon process '%s'.", getpid(), daemon->group->mutex_path); /* Don't die immediately to avoid a fork bomb. */ sleep(20); wsgi_exit_daemon_process(-1); } } /* * Create a lookup table of listener socket address * details so can use it later in daemon when trying * to map request to correct virtual host server. */ wsgi_daemon_listeners = apr_hash_make(p); for (lr = ap_listeners; lr; lr = lr->next) { char *key; char *host; apr_port_t port; host = lr->bind_addr->hostname; port = lr->bind_addr->port; if (!host) host = ""; key = apr_psprintf(p, "%s|%d", host, port); apr_hash_set(wsgi_daemon_listeners, key, APR_HASH_KEY_STRING, lr->bind_addr); } /* * Close child copy of the listening sockets for the * Apache parent process so we don't interfere with * the parent process. */ ap_close_listeners(); /* * Cleanup the Apache scoreboard to ensure that any * shared memory segments or memory mapped files not * available to code in daemon processes. */ /* * XXX If this is closed, under Apache 2.4 then daemon * mode processes will crash. Not much choice but to * leave it open. Daemon mode really needs to be * rewritten not to use normal Apache request object and * output bucket chain to avoid potential for problems. */ #if 0 ap_cleanup_scoreboard(0); #endif /* * Wipe out random value used in magic token so that not * possible for user code running in daemon process to * discover this value for other daemon process groups. * In other words, wipe out all but our own. */ entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (entry != daemon->group) entry->random = 0; } /* * Close listener socket for daemon processes for other * daemon process groups. In other words, close all but * our own. */ entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (entry != daemon->group && entry->listener_fd != -1) { close(entry->listener_fd); entry->listener_fd = -1; } } /* * Register signal handler to receive shutdown signal * from Apache parent process. We need to first create * pipe by which signal handler can notify the main * thread that signal has arrived indicating that * process needs to shutdown. */ status = apr_file_pipe_create(&wsgi_signal_pipe_in, &wsgi_signal_pipe_out, p); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_EMERG, status, wsgi_server, "mod_wsgi (pid=%d): Couldn't initialise signal " "pipe in daemon process '%s'.", getpid(), daemon->group->name); /* Don't die immediately to avoid a fork bomb. */ sleep(20); wsgi_exit_daemon_process(-1); } wsgi_daemon_shutdown = 0; wsgi_daemon_pid = getpid(); apr_signal(SIGINT, wsgi_signal_handler); apr_signal(SIGTERM, wsgi_signal_handler); apr_signal(AP_SIG_GRACEFUL, wsgi_signal_handler); #ifdef SIGXCPU apr_signal(SIGXCPU, wsgi_signal_handler); #endif /* Set limits on amount of CPU time that can be used. */ if (daemon->group->cpu_time_limit > 0) { struct rlimit limit; int result = -1; errno = ENOSYS; limit.rlim_cur = daemon->group->cpu_time_limit; limit.rlim_max = daemon->group->cpu_time_limit + 1; limit.rlim_max += daemon->group->shutdown_timeout; #if defined(RLIMIT_CPU) result = setrlimit(RLIMIT_CPU, &limit); #endif if (result == -1) { ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't set CPU time " "limit of %d seconds for process '%s'.", getpid(), daemon->group->cpu_time_limit, daemon->group->name); } } /* * Set limits on amount of date segment memory that can * be used. Although this is done, some platforms * doesn't actually support it. */ if (daemon->group->memory_limit > 0) { struct rlimit limit; int result = -1; errno = ENOSYS; limit.rlim_cur = daemon->group->memory_limit; limit.rlim_max = daemon->group->memory_limit; #if defined(RLIMIT_DATA) result = setrlimit(RLIMIT_DATA, &limit); #endif if (result == -1) { ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't set memory " "limit of %ld for process '%s'.", getpid(), (long)daemon->group->memory_limit, daemon->group->name); } } /* * Set limits on amount of virtual memory that can be used. * Although this is done, some platforms doesn't actually * support it. */ if (daemon->group->virtual_memory_limit > 0) { struct rlimit limit; int result = -1; errno = ENOSYS; limit.rlim_cur = daemon->group->virtual_memory_limit; limit.rlim_max = daemon->group->virtual_memory_limit; #if defined(RLIMIT_AS) result = setrlimit(RLIMIT_AS, &limit); #elif defined(RLIMIT_VMEM) result = setrlimit(RLIMIT_VMEM, &limit); #endif if (result == -1) { ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't set virtual memory " "limit of %ld for process '%s'.", getpid(), (long)daemon->group->virtual_memory_limit, daemon->group->name); } } /* * Flag whether multiple daemon processes or denoted * that requests could be spread across multiple daemon * process groups. */ wsgi_multiprocess = daemon->group->multiprocess; wsgi_multithread = daemon->group->threads != 1; /* * Create a pool for the child daemon process so * we can trigger various events off it at shutdown. */ apr_pool_create(&wsgi_daemon_pool, p); /* * Retain a reference to daemon process details. Do * this here as when doing lazy initialisation of * the interpreter we want to know if in a daemon * process so can pick any daemon process specific * home directory for Python installation. */ wsgi_daemon_group = daemon->group->name; wsgi_daemon_process = daemon; /* Set lang/locale if specified for daemon process. */ if (daemon->group->lang) { char *envvar; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Setting lang to %s for " "daemon process group %s.", getpid(), daemon->group->lang, daemon->group->name); envvar = apr_pstrcat(p, "LANG=", daemon->group->lang, NULL); putenv(envvar); } if (daemon->group->locale) { char *envvar; char *result; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Setting locale to %s for " "daemon process group %s.", getpid(), daemon->group->locale, daemon->group->name); envvar = apr_pstrcat(p, "LC_ALL=", daemon->group->locale, NULL); putenv(envvar); result = setlocale(LC_ALL, daemon->group->locale); if (!result) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Unsupported locale setting " "%s specified for daemon process group %s. " "Consider using 'C.UTF-8' as fallback setting.", getpid(), daemon->group->locale, daemon->group->name); } } /* Create lock for request monitoring. */ apr_thread_mutex_create(&wsgi_monitor_lock, APR_THREAD_MUTEX_UNNESTED, p); /* * Initialise Python if required to be done in the child * process. Note that it will not be initialised if * mod_python loaded and it has already been done. */ if (wsgi_python_after_fork) wsgi_python_init(p); #if PY_MAJOR_VERSION < 3 /* * If mod_python is also being loaded and thus it was * responsible for initialising Python it can leave in * place an active thread state. Under normal conditions * this would be eliminated in Apache child process by * the time that mod_wsgi got to do its own child * initialisation but in daemon process we skip the * mod_python child initialisation so the active thread * state still exists. Thus need to do a bit of a fiddle * to ensure there is no active thread state. Don't need * to worry about this with Python 3.X as mod_python * only supports Python 2.X. */ if (!wsgi_python_initialized) { PyGILState_STATE state; PyEval_AcquireLock(); state = PyGILState_Ensure(); PyGILState_Release(state); if (state == PyGILState_LOCKED) PyThreadState_Swap(NULL); PyEval_ReleaseLock(); } #endif /* * If the daemon is associated with a virtual host then * we can close all other error logs so long as they * aren't the same one as being used for the virtual * host. If the virtual host error log is different to * the main server error log, then also tie stderr to * that log file instead. This way any debugging sent * direct to stderr from C code also goes to the virtual * host error log. We close the error logs that aren't * required as that eliminates possibility that user * code executing in daemon process could maliciously * dump messages into error log for a different virtual * host, as well as stop them being reopened with mode * that would allow seeking back to start of file and * read any information in them. */ if (daemon->group->server->is_virtual) { server_rec *server = NULL; apr_file_t *errfile = NULL; /* * Iterate over all servers and close any error * logs different to that for virtual host. Note that * if errors are being redirected to syslog, then * the server error log reference will actually be * a null pointer, so need to ensure that check for * that and don't attempt to close it in that case. */ server = wsgi_server; while (server != NULL) { if (server->error_log && server->error_log != daemon->group->server->error_log) { apr_file_close(server->error_log); } server = server->next; } /* * Reassociate stderr output with error log from the * virtual host the daemon is associated with. Close * the virtual host error log and point it at stderr * log instead. Do the latter so don't get two * references to same open file. Just in case * anything still accesses error log of main server, * map main server error log to that of the virtual * host. Note that cant do this if errors are being * redirected to syslog, as indicated by virtual * host error log being a null pointer. In that case * just leave everything as it was. Also can't remap * the error log for main server if it was being * redirected to syslog but virtual host wasn't. */ if (daemon->group->server->error_log && daemon->group->server->error_log != wsgi_server->error_log) { apr_file_t *oldfile = NULL; apr_file_open_stderr(&errfile, wsgi_server->process->pool); apr_file_dup2(errfile, daemon->group->server->error_log, wsgi_server->process->pool); oldfile = daemon->group->server->error_log; server = wsgi_server; while (server != NULL) { if (server->error_log == oldfile) server->error_log = errfile; server = server->next; } apr_file_close(oldfile); if (wsgi_server->error_log) wsgi_server->error_log = errfile; } } /* * Update reference to server object in case daemon * process is actually associated with a virtual host. * This way all logging actually goes into the virtual * hosts log file. */ if (daemon->group->server) { if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Process '%s' logging to " "'%s'.", getpid(), daemon->group->name, daemon->group->server->server_hostname); } wsgi_server = daemon->group->server; } else { if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Process '%s' forced to log " "to '%s'.", getpid(), daemon->group->name, wsgi_server->server_hostname); } } /* Time daemon process started waiting for requests. */ wsgi_restart_time = apr_time_now(); /* * Setup Python in the child daemon process. Note that * we ensure that we are now marked as the original * initialiser of the Python interpreter even though * mod_python might have done it, as we will be the one * to cleanup the child daemon process and not * mod_python. We also need to perform the special * Python setup which has to be done after a fork. */ wsgi_python_initialized = 1; wsgi_python_path = daemon->group->python_path; wsgi_python_eggs = daemon->group->python_eggs; wsgi_newrelic_config_file = daemon->group->newrelic_config_file; wsgi_newrelic_environment = daemon->group->newrelic_environment; wsgi_python_child_init(wsgi_daemon_pool); /* * Create socket wrapper for listener file descriptor * and mutex for controlling which thread gets to * perform the accept() when a connection is ready. */ apr_os_sock_put(&daemon->listener, &daemon->group->listener_fd, p); /* * Run the main routine for the daemon process if there * is a non zero number of threads. When number of threads * is zero we actually go on and shutdown immediately. */ if (daemon->group->threads != 0) wsgi_daemon_main(p, daemon); /* * Destroy the pool for the daemon process. This will * have the side affect of also destroying Python. */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Stopping process '%s'.", getpid(), daemon->group->name); apr_pool_destroy(wsgi_daemon_pool); /* Exit the daemon process when being shutdown. */ wsgi_exit_daemon_process(0); } #ifdef HAVE_FORK if (wsgi_python_initialized) { #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 7) #if 0 /* * XXX Appears to be wrong to call this at this point especially * since we haven't acquired the GIL. It wouldn't have been possible * for any user code to have registered a Python callback to run * in parent after fork either. Leave in code for now but disabled. */ PyOS_AfterFork_Parent(); #endif #endif } #endif apr_pool_note_subprocess(p, &daemon->process, APR_KILL_AFTER_TIMEOUT); apr_proc_other_child_register(&daemon->process, wsgi_manage_process, daemon, NULL, p); return OK; } static int wsgi_start_daemons(apr_pool_t *p) { WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; WSGIDaemonProcess *process = NULL; int mpm_generation = 0; int i, j; /* Do we need to create any daemon processes. */ if (!wsgi_daemon_list) return OK; /* What server generation is this. */ #if defined(AP_MPMQ_GENERATION) ap_mpm_query(AP_MPMQ_GENERATION, &mpm_generation); #else mpm_generation = ap_my_generation; #endif /* * Cache references to root server and pool as will need * to access these when restarting daemon process when * they die. */ wsgi_parent_pool = p; /* * Startup in turn the required number of daemon processes * for each of the named process groups. */ wsgi_daemon_index = apr_hash_make(p); entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { int status; entry = &entries[i]; /* * Check for whether the daemon process user and * group are the default Apache values. If they are * then reset them to the current values configured for * Apache. This is to work around where the User/Group * directives had not been set before the WSGIDaemonProcess * directive was used in configuration file. In this case, * where no 'user' and 'group' options were provided, * the default values would have been used, but these * were later overridden thus why we need to update it. */ if (entry->uid == ap_uname2id(DEFAULT_USER)) { entry->uid = ap_unixd_config.user_id; entry->user = ap_unixd_config.user_name; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Reset default user for " "daemon process group '%s' to uid=%ld.", getpid(), entry->name, (long)entry->uid); } if (entry->gid == ap_gname2id(DEFAULT_GROUP)) { entry->gid = ap_unixd_config.group_id; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Reset default group for " "daemon process group '%s' to gid=%ld.", getpid(), entry->name, (long)entry->gid); } /* * Calculate path for socket to accept requests on and * create the socket. */ entry->socket_rotation = wsgi_server_config->socket_rotation; if (entry->socket_rotation) { entry->socket_path = apr_psprintf(p, "%s.%d.%d.%d.sock", wsgi_server_config->socket_prefix, getpid(), mpm_generation, entry->id); } else { entry->socket_path = apr_psprintf(p, "%s.%d.u%d.%d.sock", wsgi_server_config->socket_prefix, getpid(), entry->uid, entry->id); } apr_hash_set(wsgi_daemon_index, entry->name, APR_HASH_KEY_STRING, entry); entry->listener_fd = wsgi_setup_socket(entry); if (entry->listener_fd == -1) return DECLINED; /* * Register cleanup so that listener socket is cleaned * up properly on a restart and on shutdown. */ apr_pool_cleanup_register(p, entry, wsgi_cleanup_process, apr_pool_cleanup_null); /* * If there is more than one daemon process in the group * then need to create an accept mutex for the daemon * processes to use so they don't interfere with each * other. */ if (entry->processes > 1) { entry->mutex_path = apr_psprintf(p, "%s.%d.%d.%d.lock", wsgi_server_config->socket_prefix, getpid(), mpm_generation, entry->id); status = apr_proc_mutex_create(&entry->mutex, entry->mutex_path, wsgi_server_config->lock_mechanism, p); if (status != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): Couldn't create accept " "lock '%s' (%d).", getpid(), entry->mutex_path, wsgi_server_config->lock_mechanism); return DECLINED; } /* * Depending on the locking mechanism being used * need to change the permissions of the lock. Can't * use unixd_set_proc_mutex_perms() as it uses the * default Apache child process uid/gid where the * daemon process uid/gid can be different. */ if (!geteuid()) { #if APR_HAS_SYSVSEM_SERIALIZE if (!strcmp(apr_proc_mutex_name(entry->mutex), "sysvsem")) { apr_os_proc_mutex_t ospmutex; #if !APR_HAVE_UNION_SEMUN union semun { long val; struct semid_ds *buf; unsigned short *array; }; #endif union semun ick; struct semid_ds buf; apr_os_proc_mutex_get(&ospmutex, entry->mutex); buf.sem_perm.uid = entry->uid; buf.sem_perm.gid = entry->gid; buf.sem_perm.mode = 0600; ick.buf = &buf; if (semctl(ospmutex.crossproc, 0, IPC_SET, ick) < 0) { ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): " "Couldn't set permissions on accept " "mutex '%s' (sysvsem).", getpid(), entry->mutex_path); return DECLINED; } } #endif #if APR_HAS_FLOCK_SERIALIZE if (!strcmp(apr_proc_mutex_name(entry->mutex), "flock")) { if (chown(entry->mutex_path, entry->uid, -1) < 0) { ap_log_error(APLOG_MARK, APLOG_CRIT, errno, wsgi_server, "mod_wsgi (pid=%d): " "Couldn't set permissions on accept " "mutex '%s' (flock).", getpid(), entry->mutex_path); return DECLINED; } } #endif } } /* Create the actual required daemon processes. */ for (j = 1; j <= entry->processes; j++) { process = (WSGIDaemonProcess *)apr_pcalloc(p, sizeof( WSGIDaemonProcess)); process->group = entry; process->instance = j; status = wsgi_start_process(p, process); if (status != OK) return status; } } return OK; } static apr_pool_t *wsgi_pconf_pool = NULL; static int wsgi_deferred_start_daemons(apr_pool_t *p, ap_scoreboard_e sb_type) { return wsgi_start_daemons(wsgi_pconf_pool); } static apr_status_t wsgi_socket_connect_un(apr_socket_t *sock, struct sockaddr_un *sa) { apr_status_t rv; apr_os_sock_t rawsock; apr_interval_time_t t; rv = apr_os_sock_get(&rawsock, sock); if (rv != APR_SUCCESS) { return rv; } rv = apr_socket_timeout_get(sock, &t); if (rv != APR_SUCCESS) { return rv; } do { rv = connect(rawsock, (struct sockaddr*)sa, APR_OFFSETOF(struct sockaddr_un, sun_path) + strlen(sa->sun_path) + 1); } while (rv == -1 && errno == EINTR); if ((rv == -1) && (errno == EINPROGRESS || errno == EALREADY) && (t > 0)) { #if APR_MAJOR_VERSION < 2 rv = apr_wait_for_io_or_timeout(NULL, sock, 0); #else rv = apr_socket_wait(sock, APR_WAIT_WRITE); #endif if (rv != APR_SUCCESS) { return rv; } } if (rv == -1 && errno != EISCONN) { return errno; } return APR_SUCCESS; } static int wsgi_connect_daemon(request_rec *r, WSGIDaemonSocket *daemon) { WSGIRequestConfig *config = NULL; apr_status_t rv; struct sockaddr_un addr; int retries = 0; apr_interval_time_t timer = 0; apr_interval_time_t total_time = 0; apr_time_t start_time = 0; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); memset(&addr, 0, sizeof(addr)); addr.sun_family = AF_UNIX; apr_cpystrn(addr.sun_path, daemon->socket_path, sizeof(addr.sun_path)); start_time = apr_time_now(); while (1) { retries++; config->daemon_connects++; rv = apr_socket_create(&daemon->socket, AF_UNIX, SOCK_STREAM, 0, r->pool); if (rv != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, rv, r, "mod_wsgi (pid=%d): Unable to create socket to " "connect to WSGI daemon process.", getpid()); return HTTP_INTERNAL_SERVER_ERROR; } /* * Apply timeout before issuing the socket connection in * case this hangs for some reason. Would have to be an extreme * event for a UNIX socket connect to hang, but have had some * unexplained situations which look exactly like that. */ if (daemon->socket_timeout) apr_socket_timeout_set(daemon->socket, daemon->socket_timeout); else apr_socket_timeout_set(daemon->socket, r->server->timeout); rv = wsgi_socket_connect_un(daemon->socket, &addr); if (rv != APR_SUCCESS) { /* * We need to check for both connection refused and * connection unavailable as Linux systems when * connecting to a UNIX listener socket in non * blocking mode, where the listener backlog is full * will return the error EAGAIN rather than returning * ECONNREFUSED as is supposedly dictated by POSIX. */ if (APR_STATUS_IS_ECONNREFUSED(rv) || APR_STATUS_IS_EAGAIN(rv)) { if ((apr_time_now()-start_time) < daemon->connect_timeout) { if (wsgi_server_config->verbose_debugging) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, rv, r, "mod_wsgi (pid=%d): Connection attempt " "#%d to WSGI daemon process '%s' on " "'%s' failed, sleeping before retrying " "again.", getpid(), retries, daemon->name, daemon->socket_path); } apr_socket_close(daemon->socket); /* * Progressively increase time we wait between * connection attempts. Start at 0.125 second, but * back off to 1 second interval after 2 seconds. */ if (total_time < apr_time_make(2, 0)) timer = apr_time_make(0, 125000); else timer = apr_time_make(1, 0); apr_sleep(timer); total_time += timer; } else { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "mod_wsgi (pid=%d): Unable to connect to " "WSGI daemon process '%s' on '%s' after " "multiple attempts as listener backlog " "limit was exceeded or the socket does " "not exist.", getpid(), daemon->name, daemon->socket_path); apr_socket_close(daemon->socket); return HTTP_SERVICE_UNAVAILABLE; } } else { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "mod_wsgi (pid=%d): Unable to connect to " "WSGI daemon process '%s' on '%s' as user " "with uid=%ld.", getpid(), daemon->name, daemon->socket_path, (long)geteuid()); apr_socket_close(daemon->socket); return HTTP_SERVICE_UNAVAILABLE; } } else break; } return OK; } static apr_status_t wsgi_socket_send(apr_socket_t *sock, const char *buf, size_t buf_size) { apr_status_t rv; apr_size_t len; while (buf_size > 0) { len = buf_size; rv = apr_socket_send(sock, buf, &len); if (rv != APR_SUCCESS) return rv; buf += len; buf_size -= len; } return APR_SUCCESS; } static apr_status_t wsgi_socket_sendv_limit(apr_socket_t *sock, struct iovec *vec, size_t nvec) { apr_status_t rv; apr_size_t written = 0; apr_size_t to_write = 0; size_t i, offset; /* Calculate how much has to be sent. */ for (i = 0; i < nvec; i++) { to_write += vec[i].iov_len; } /* Loop until all data has been sent. */ offset = 0; while (to_write) { apr_size_t n = 0; rv = apr_socket_sendv(sock, vec+offset, nvec-offset, &n); if (rv != APR_SUCCESS) return rv; if (n > 0) { /* Bail out of all data has been sent. */ written += n; if (written >= to_write) break; /* * Not all data was sent, so ween need to try * again with the remainder of the data. We * first need to work out where to start from. */ for (i = offset; i < nvec; ) { if (n >= vec[i].iov_len) { offset++; n -= vec[i++].iov_len; } else { vec[i].iov_len -= n; vec[i].iov_base = (char *) vec[i].iov_base + n; break; } } } } return APR_SUCCESS; } static apr_status_t wsgi_socket_sendv(apr_socket_t *sock, struct iovec *vec, size_t nvec) { #if defined(_SC_IOV_MAX) static size_t iov_max = 0; if (iov_max == 0) iov_max = sysconf(_SC_IOV_MAX); #else static size_t iov_max = APR_MAX_IOVEC_SIZE; #endif if (nvec > iov_max) { int offset = 0; while (nvec != 0) { apr_status_t rv; rv = wsgi_socket_sendv_limit(sock, &vec[offset], (nvec < iov_max ? nvec : (int)iov_max)); if (rv != APR_SUCCESS) return rv; if (nvec > iov_max) { nvec -= iov_max; offset += iov_max; } else { nvec = 0; } } return APR_SUCCESS; } else return wsgi_socket_sendv_limit(sock, vec, nvec); } static apr_status_t wsgi_send_request(request_rec *r, WSGIRequestConfig *config, WSGIDaemonSocket *daemon) { int rv; const apr_array_header_t *env_arr; const apr_table_entry_t *elts; int i; struct iovec *vec; struct iovec *vec_start; struct iovec *vec_next; apr_size_t total = 0; apr_size_t count = 0; apr_table_setn(r->subprocess_env, "mod_wsgi.daemon_connects", apr_psprintf(r->pool, "%d", config->daemon_connects)); apr_table_setn(r->subprocess_env, "mod_wsgi.daemon_restarts", apr_psprintf(r->pool, "%d", config->daemon_restarts)); /* Send subprocess environment from request object. */ env_arr = apr_table_elts(r->subprocess_env); elts = (const apr_table_entry_t *)env_arr->elts; /* * Sending total amount of data, followed by count of separate * strings and then each null terminated string. The total is * inclusive of the bytes used for the count of the strings. */ vec = (struct iovec *)apr_palloc(r->pool, (2+(2*env_arr->nelts))* sizeof(struct iovec)); vec_start = &vec[2]; vec_next = vec_start; for (i=0; inelts; ++i) { if (!elts[i].key) continue; vec_next->iov_base = (void*)elts[i].key; vec_next->iov_len = strlen(elts[i].key) + 1; total += vec_next->iov_len; vec_next++; if (elts[i].val) { vec_next->iov_base = (void*)elts[i].val; vec_next->iov_len = strlen(elts[i].val) + 1; } else { vec_next->iov_base = (void*)""; vec_next->iov_len = 1; } total += vec_next->iov_len; vec_next++; } count = vec_next - vec_start; vec[1].iov_base = (void*)&count; vec[1].iov_len = sizeof(count); total += vec[1].iov_len; vec[0].iov_base = (void*)&total; vec[0].iov_len = sizeof(total); rv = wsgi_socket_sendv(daemon->socket, vec, (int)(vec_next-vec)); if (rv != APR_SUCCESS) return rv; return APR_SUCCESS; } static void wsgi_discard_output(apr_bucket_brigade *bb) { apr_bucket *e; const char *buf; apr_size_t len; apr_status_t rv; for (e = APR_BRIGADE_FIRST(bb); e != APR_BRIGADE_SENTINEL(bb); e = APR_BUCKET_NEXT(e)) { if (APR_BUCKET_IS_EOS(e)) { break; } rv = apr_bucket_read(e, &buf, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { break; } } } static int wsgi_copy_header(void *v, const char *key, const char *val) { apr_table_addn(v, key, val); return 1; } #define HTTP_UNSET (-HTTP_OK) static int wsgi_scan_headers(request_rec *r, char *buffer, int buflen, int (*getsfunc) (char *, int, void *), void *getsfunc_data) { char x[32768]; char *w, *l; size_t p; int cgi_status = HTTP_UNSET; apr_table_t *merge; apr_table_t *cookie_table; apr_table_t *authen_table; WSGIRequestConfig *config = NULL; config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); /* * Default to internal fixed size buffer for reading headers if one * is not supplied explicitly with the call. */ if (buffer) *buffer = '\0'; w = buffer ? buffer : x; buflen = buffer ? buflen : sizeof(x); /* Temporary place to hold headers as we read them. */ merge = apr_table_make(r->pool, 10); /* * The HTTP specification says that it is legal to merge duplicate * headers into one. Some browsers don't like certain headers being * merged however. These headers are Set-Cookie and WWW-Authenticate. * We will therefore keep these separate and merge them back in * independently at the end. Before we start though, we need to make * sure we save away any instances of these headers which may already * be listed in the request structure for some reason. */ cookie_table = apr_table_make(r->pool, 2); apr_table_do(wsgi_copy_header, cookie_table, r->headers_out, "Set-Cookie", NULL); authen_table = apr_table_make(r->pool, 2); apr_table_do(wsgi_copy_header, authen_table, r->err_headers_out, "WWW-Authenticate", NULL); while (1) { int rv = (*getsfunc) (w, buflen - 1, getsfunc_data); if (rv == 0) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Truncated or " "oversized response headers received from " "daemon process '%s'", config->process_group), r->filename); r->status_line = NULL; return HTTP_INTERNAL_SERVER_ERROR; } else if (rv == -1) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Timeout when " "reading response headers from daemon " "process '%s'", config->process_group), r->filename); r->status_line = NULL; return HTTP_GATEWAY_TIME_OUT; } /* * Delete any trailing (CR?)LF. Indeed, the host's '\n': * '\012' for UNIX; '\015' for MacOS; '\025' for OS/390. */ p = strlen(w); if (p > 0 && w[p - 1] == '\n') { if (p > 1 && w[p - 2] == CR) { w[p - 2] = '\0'; } else { w[p - 1] = '\0'; } } /* * If we've finished reading the headers, check to make sure * any HTTP/1.1 conditions are met. If so, we're done; normal * processing will handle the script's output. If not, just * return the error. */ if (w[0] == '\0') { int cond_status = OK; /* * This fails because it gets confused when a CGI Status * header overrides ap_meets_conditions. * * We can fix that by dropping ap_meets_conditions when * Status has been set. Since this is the only place * cgi_status gets used, let's test it explicitly. * * The alternative would be to ignore CGI Status when * ap_meets_conditions returns anything interesting. That * would be safer wrt HTTP, but would break CGI. */ if ((cgi_status == HTTP_UNSET) && (r->method_number == M_GET)) { cond_status = ap_meets_conditions(r); } /* * Merge the headers received back into the request * structure. There should only be one per header with * values combined for these. */ apr_table_overlap(r->headers_out, merge, APR_OVERLAP_TABLES_MERGE); /* * Now add in the special headers which we can't merge * because it gives certain browsers problems. */ if (!apr_is_empty_table(cookie_table)) { apr_table_unset(r->headers_out, "Set-Cookie"); r->headers_out = apr_table_overlay(r->pool, r->headers_out, cookie_table); } if (!apr_is_empty_table(authen_table)) { apr_table_unset(r->err_headers_out, "WWW-Authenticate"); r->err_headers_out = apr_table_overlay(r->pool, r->err_headers_out, authen_table); } return cond_status; } /* If we see a bogus header don't ignore it. Shout and scream. */ if (!(l = strchr(w, ':'))) { char malformed[32]; strncpy(malformed, w, sizeof(malformed)-1); malformed[sizeof(malformed)-1] = '\0'; if (!buffer) { /* Soak up all the script output. */ while ((*getsfunc)(w, buflen - 1, getsfunc_data) > 0) { continue; } } wsgi_log_script_error(r, apr_psprintf(r->pool, "Malformed " "header '%s' found when reading script " "headers from daemon process '%s'", malformed, config->process_group), r->filename); r->status_line = NULL; return HTTP_INTERNAL_SERVER_ERROR; } /* Strip leading white space from header value. */ *l++ = '\0'; while (*l && apr_isspace(*l)) { ++l; } if (!strcasecmp(w, "Content-type")) { char *tmp; /* Nuke trailing whitespace. */ char *endp = l + strlen(l) - 1; while (endp > l && apr_isspace(*endp)) { *endp-- = '\0'; } tmp = apr_pstrdup(r->pool, l); ap_content_type_tolower(tmp); ap_set_content_type(r, tmp); } else if (!strcasecmp(w, "Status")) { /* * If the script returned a specific status, that's what * we'll use, otherwise we assume 200 OK. */ r->status = cgi_status = atoi(l); r->status_line = apr_pstrdup(r->pool, l); } else if (!strcasecmp(w, "Location")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Content-Length")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Content-Range")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Transfer-Encoding")) { apr_table_set(r->headers_out, w, l); } else if (!strcasecmp(w, "Last-Modified")) { /* * If the script gave us a Last-Modified header, we can't just * pass it on blindly because of restrictions on future values. */ ap_update_mtime(r, apr_date_parse_http(l)); ap_set_last_modified(r); } else if (!strcasecmp(w, "Set-Cookie")) { apr_table_add(cookie_table, w, l); } else if (!strcasecmp(w, "WWW-Authenticate")) { apr_table_add(authen_table, w, l); } else { apr_table_add(merge, w, l); } } return OK; } static int wsgi_getsfunc_brigade(char *buf, int len, void *arg) { apr_bucket_brigade *bb = (apr_bucket_brigade *)arg; const char *dst_end = buf + len - 1; char *dst = buf; apr_bucket *e = APR_BRIGADE_FIRST(bb); apr_status_t rv; int done = 0; while ((dst < dst_end) && !done && e != APR_BRIGADE_SENTINEL(bb) && !APR_BUCKET_IS_EOS(e)) { const char *bucket_data; apr_size_t bucket_data_len; const char *src; const char *src_end; apr_bucket * next; rv = apr_bucket_read(e, &bucket_data, &bucket_data_len, APR_BLOCK_READ); if (rv != APR_SUCCESS || (bucket_data_len == 0)) { *dst = '\0'; return APR_STATUS_IS_TIMEUP(rv) ? -1 : 0; } src = bucket_data; src_end = bucket_data + bucket_data_len; while ((src < src_end) && (dst < dst_end) && !done) { if (*src == '\n') { done = 1; } else if (*src != '\r') { *dst++ = *src; } src++; } if (src < src_end) { apr_bucket_split(e, src - bucket_data); } next = APR_BUCKET_NEXT(e); APR_BUCKET_REMOVE(e); apr_bucket_destroy(e); e = next; } *dst = '\0'; return done; } static int wsgi_scan_headers_brigade(request_rec *r, apr_bucket_brigade *bb, char *buffer, int buflen) { return wsgi_scan_headers(r, buffer, buflen, wsgi_getsfunc_brigade, bb); } static int wsgi_transfer_response(request_rec *r, apr_bucket_brigade *bb, apr_size_t buffer_size, apr_time_t timeout) { apr_bucket *e; apr_read_type_e mode = APR_NONBLOCK_READ; apr_bucket_brigade *tmpbb; const char *data = NULL; apr_size_t length = 0; apr_size_t bytes_transfered = 0; int bucket_count = 0; apr_status_t rv; #if AP_MODULE_MAGIC_AT_LEAST(20110605, 2) apr_socket_t *sock; apr_interval_time_t existing_timeout = 0; #endif if (buffer_size == 0) buffer_size = 65536; /* * Override the socket timeout for writing back data to the * client. If that wasn't defined this will be the same as * the timeout for the socket used in communicating with the * daemon, or left as the overall server timeout if that * isn't specified. Just to be safe we remember the existing * timeout and restore it at the end of a successful request * in case the same connection if kept alive and used for a * subsequent request with a different handler. */ #if AP_MODULE_MAGIC_AT_LEAST(20110605, 2) sock = ap_get_conn_socket(r->connection); rv = apr_socket_timeout_get(sock, &existing_timeout); if (rv != APR_SUCCESS) { existing_timeout = 0; } else { if (timeout) apr_socket_timeout_set(sock, timeout); } #endif /* * Transfer any response content. We want to avoid the * problem where the core output filter has no flow control * to deal with slow HTTP clients and can actually buffer up * excessive amounts of response content in memory. A fix * for this was only introduced in Apache 2.3.3, with * possible further tweaks in Apache 2.4.1. To avoid issue of * what version it was implemented in, just employ a * strategy of forcing a flush every time we pass through * more than a certain amount of data. */ tmpbb = apr_brigade_create(r->pool, r->connection->bucket_alloc); while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) { /* If we have reached end of stream, we need to pass it on */ if (APR_BUCKET_IS_EOS(e)) { /* * Probably do not need to force a flush as EOS should * do that, but do it just in case when we potentially * have pending data to be written out. */ if (bytes_transfered != 0) { APR_BRIGADE_INSERT_TAIL(tmpbb, apr_bucket_flush_create( r->connection->bucket_alloc)); } APR_BRIGADE_INSERT_TAIL(tmpbb, apr_bucket_eos_create( r->connection->bucket_alloc)); rv = ap_pass_brigade(r->output_filters, tmpbb); apr_brigade_cleanup(tmpbb); if (rv != APR_SUCCESS) { apr_brigade_destroy(bb); /* * Don't flag error if client connection was aborted * so that access log still records the original HTTP * response code returned by the WSGI application. */ if (r->connection->aborted) return OK; return HTTP_INTERNAL_SERVER_ERROR; } break; } /* * Force the reading in of next block of data to be * transfered if necessary. If the bucket is a heap * bucket, then it will be whatever data is in it. If it * is a socket bucket, this will result in the bucket * being converted to a heap bucket with some amount of * data and the socket bucket added back in after it. Any * non data buckets should be skipped and discarded. The * result should always be that the first bucket is a * heap bucket. */ rv = apr_bucket_read(e, &data, &length, mode); /* * If we would have blocked if not in non blocking mode * we send a flush bucket to ensure that all buffered * data is sent out before we block waiting for more. */ if (rv == APR_EAGAIN && mode == APR_NONBLOCK_READ) { APR_BRIGADE_INSERT_TAIL(tmpbb, apr_bucket_flush_create( r->connection->bucket_alloc)); rv = ap_pass_brigade(r->output_filters, tmpbb); apr_brigade_cleanup(tmpbb); if (rv == APR_TIMEUP) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "mod_wsgi (pid=%d): Failed to proxy response " "to client.", getpid()); } if (rv != APR_SUCCESS) { apr_brigade_destroy(bb); /* * Don't flag error if client connection was aborted * so that access log still records the original HTTP * response code returned by the WSGI application. */ if (r->connection->aborted) return OK; return HTTP_INTERNAL_SERVER_ERROR; } bytes_transfered = 0; bucket_count = 0; /* * Retry read from daemon using a blocking read. We do * not delete the bucket as we want to operate on the * same one as we would have blocked. */ mode = APR_BLOCK_READ; continue; } else if (rv != APR_SUCCESS) { apr_brigade_destroy(bb); ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "mod_wsgi (pid=%d): Failed to proxy response " "from daemon.", getpid()); /* * Don't flag error if couldn't read from daemon * so that access log still records the original HTTP * response code returned by the WSGI application. */ return OK; } /* * We had some data to transfer. Next time round we need to * always be try a non-blocking read first. */ mode = APR_NONBLOCK_READ; /* * Now we don't actually work with the data which was * read direct and instead simply remove what should be a * heap bucket from the start of the bucket brigade and * then place in a new bucket brigade to be pushed out to * the client. By passing down the bucket, it avoids the * need to create a transient bucket holding a reference * to the data from the first bucket. */ APR_BUCKET_REMOVE(e); APR_BRIGADE_INSERT_TAIL(tmpbb, e); /* * If we have reached the buffer size threshold, we want * to flush the data so that we aren't buffering too much * in memory and blowing out memory size. We also have a * check on the number of buckets we have accumulated as * a large number of buckets with very small amounts of * data will also accumulate a lot of memory. Apache's * own flow control doesn't cope with such a situation. * Right now hard wire the max number of buckets at 16 * which equates to worst case number of separate data * blocks can be written by a writev() call on systems * such as Solaris. */ bytes_transfered += length; bucket_count += 1; if (bytes_transfered > buffer_size || bucket_count >= 16) { APR_BRIGADE_INSERT_TAIL(tmpbb, apr_bucket_flush_create( r->connection->bucket_alloc)); bytes_transfered = 0; bucket_count = 0; /* * Since we flushed the data out to the client, it is * okay to go back and do a blocking read the next time. */ mode = APR_BLOCK_READ; } /* Pass the heap bucket and any flush bucket on. */ rv = ap_pass_brigade(r->output_filters, tmpbb); apr_brigade_cleanup(tmpbb); if (rv == APR_TIMEUP) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "mod_wsgi (pid=%d): Failed to proxy response " "to client.", getpid()); } if (rv != APR_SUCCESS) { apr_brigade_destroy(bb); /* * Don't flag error if client connection was aborted * so that access log still records the original HTTP * response code returned by the WSGI application. */ if (r->connection->aborted) return OK; return HTTP_INTERNAL_SERVER_ERROR; } } #if AP_MODULE_MAGIC_AT_LEAST(20110605, 2) if (existing_timeout) apr_socket_timeout_set(sock, existing_timeout); #endif apr_brigade_destroy(bb); return OK; } #define ASCII_CRLF "\015\012" #define ASCII_ZERO "\060" static int wsgi_execute_remote(request_rec *r) { WSGIRequestConfig *config = NULL; WSGIDaemonSocket *daemon = NULL; WSGIProcessGroup *group = NULL; char *key = NULL; const char *hash = NULL; int status; apr_status_t rv; int seen_eos; int child_stopped_reading; apr_bucket_brigade *bbout; apr_bucket_brigade *bbin; apr_bucket *b; const char *location = NULL; char *header_buffer = NULL; int header_buflen = 0; /* Grab request configuration. */ config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); /* * Only allow the process group to match against a restricted * set of processes if such a restricted set has been defined. */ if (config->restrict_process) { if (!apr_table_get(config->restrict_process, config->process_group)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Daemon " "process called '%s' cannot be " "accessed by this WSGI application " "as not a member of allowed groups", config->process_group), r->filename); return HTTP_INTERNAL_SERVER_ERROR; } } /* * Do not process request as remote if actually targeted at * the main Apache processes. */ if (!*config->process_group) return DECLINED; /* Grab details of matching process group. */ if (!wsgi_daemon_index) { wsgi_log_script_error(r, apr_psprintf(r->pool, "No WSGI daemon " "process called '%s' has been configured", config->process_group), r->filename); return HTTP_INTERNAL_SERVER_ERROR; } group = (WSGIProcessGroup *)apr_hash_get(wsgi_daemon_index, config->process_group, APR_HASH_KEY_STRING); if (!group) { wsgi_log_script_error(r, apr_psprintf(r->pool, "No WSGI daemon " "process called '%s' has been configured", config->process_group), r->filename); return HTTP_INTERNAL_SERVER_ERROR; } /* * Only allow the process group to match against a daemon * process defined within a virtual host with the same * server name or a daemon process defined at global server * scope. */ if (group->server != r->server && group->server != wsgi_server) { if (strcmp(group->server->server_hostname, r->server->server_hostname) != 0) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Daemon " "process called '%s' cannot be " "accessed by this WSGI application", config->process_group), r->filename); return HTTP_INTERNAL_SERVER_ERROR; } } /* * Check restrictions related to the group of the WSGI * script file and who has write access to the directory it * is contained in. If not satisfied forbid access. */ if (group->script_group) { apr_uid_t gid; struct group *grent = NULL; const char *grname = NULL; apr_finfo_t finfo; const char *path = NULL; if (!(r->finfo.valid & APR_FINFO_GROUP)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Group " "information not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } gid = r->finfo.group; if ((grent = getgrgid(gid)) == NULL) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Couldn't " "determine group of WSGI script file, " "gid=%ld", (long)gid), r->filename); return HTTP_FORBIDDEN; } grname = grent->gr_name; if (strcmp(group->script_group, grname)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Group of WSGI " "script file does not match required group " "for daemon process, group=%s", grname), r->filename); return HTTP_FORBIDDEN; } if (!(r->finfo.valid & APR_FINFO_WPROT)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "World " "permissions not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } if (r->finfo.protection & APR_FPROT_WWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "WSGI script " "file is writable to world"), r->filename); return HTTP_FORBIDDEN; } path = ap_make_dirstr_parent(r->pool, r->filename); if (apr_stat(&finfo, path, APR_FINFO_NORM, r->pool) != APR_SUCCESS) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Unable to stat " "parent directory of WSGI script"), path); return HTTP_FORBIDDEN; } gid = finfo.group; if ((grent = getgrgid(gid)) == NULL) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Couldn't " "determine group of parent directory of " "WSGI script file, gid=%ld", (long)gid), r->filename); return HTTP_FORBIDDEN; } grname = grent->gr_name; if (strcmp(group->script_group, grname)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Group of parent " "directory of WSGI script file does not " "match required group for daemon process, " "group=%s", grname), r->filename); return HTTP_FORBIDDEN; } if (finfo.protection & APR_FPROT_WWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Parent directory " "of WSGI script file is writable to world"), r->filename); return HTTP_FORBIDDEN; } } /* * Check restrictions related to who can be the owner of * the WSGI script file and who has write access to the * directory it is contained in. If not satisfied forbid * access. */ if (group->script_user) { apr_uid_t uid; struct passwd *pwent = NULL; const char *pwname = NULL; apr_finfo_t finfo; const char *path = NULL; if (!(r->finfo.valid & APR_FINFO_USER)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "User " "information not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } uid = r->finfo.user; if ((pwent = getpwuid(uid)) == NULL) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Couldn't " "determine owner of WSGI script file, " "uid=%ld", (long)uid), r->filename); return HTTP_FORBIDDEN; } pwname = pwent->pw_name; if (strcmp(group->script_user, pwname)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Owner of WSGI " "script file does not match required user " "for daemon process, user=%s", pwname), r->filename); return HTTP_FORBIDDEN; } if (!(r->finfo.valid & APR_FINFO_GPROT)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Group " "permissions not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } if (r->finfo.protection & APR_FPROT_GWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "WSGI script " "file is writable to group"), r->filename); return HTTP_FORBIDDEN; } if (!(r->finfo.valid & APR_FINFO_WPROT)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "World " "permissions not available for WSGI " "script file"), r->filename); return HTTP_FORBIDDEN; } if (r->finfo.protection & APR_FPROT_WWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "WSGI script " "file is writable to world"), r->filename); return HTTP_FORBIDDEN; } path = ap_make_dirstr_parent(r->pool, r->filename); if (apr_stat(&finfo, path, APR_FINFO_NORM, r->pool) != APR_SUCCESS) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Unable to stat " "parent directory of WSGI script"), path); return HTTP_FORBIDDEN; } uid = finfo.user; if ((pwent = getpwuid(uid)) == NULL) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Couldn't " "determine owner of parent directory of " "WSGI script file, uid=%ld", (long)uid), r->filename); return HTTP_FORBIDDEN; } pwname = pwent->pw_name; if (strcmp(group->script_user, pwname)) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Owner of parent " "directory of WSGI script file does not " "match required user for daemon process, " "user=%s", pwname), r->filename); return HTTP_FORBIDDEN; } if (finfo.protection & APR_FPROT_WWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Parent directory " "of WSGI script file is writable to world"), r->filename); return HTTP_FORBIDDEN; } if (finfo.protection & APR_FPROT_GWRITE) { wsgi_log_script_error(r, apr_psprintf(r->pool, "Parent directory " "of WSGI script file is writable to group"), r->filename); return HTTP_FORBIDDEN; } } /* * Add magic marker into request environment so that daemon * process can verify that request is from a sender that can * be trusted. Wipe out original key to make it a bit harder * for rogue code in Apache child processes to trawl through * memory looking for unhashed string. */ key = apr_psprintf(r->pool, "%ld|%s|%s|%s", group->random, group->socket_path, r->filename, config->handler_script); hash = ap_md5(r->pool, (const unsigned char *)key); memset(key, '\0', strlen(key)); apr_table_setn(r->subprocess_env, "mod_wsgi.magic", hash); /* Create connection to the daemon process. */ apr_table_setn(r->subprocess_env, "mod_wsgi.queue_start", apr_psprintf(r->pool, "%" APR_TIME_T_FMT, apr_time_now())); daemon = (WSGIDaemonSocket *)apr_pcalloc(r->pool, sizeof(WSGIDaemonSocket)); daemon->name = config->process_group; daemon->socket_path = group->socket_path; daemon->connect_timeout = group->connect_timeout; daemon->socket_timeout = group->socket_timeout; if ((status = wsgi_connect_daemon(r, daemon)) != OK) return status; /* Send request details and subprocess environment. */ if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Request server was " "'%s|%d'.", getpid(), r->server->server_hostname, r->server->port); } if ((rv = wsgi_send_request(r, config, daemon)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "mod_wsgi (pid=%d): Unable to send request details " "to WSGI daemon process '%s' on '%s'.", getpid(), daemon->name, daemon->socket_path); return HTTP_INTERNAL_SERVER_ERROR; } /* Setup bucket brigade for reading response from daemon. */ bbin = apr_brigade_create(r->pool, r->connection->bucket_alloc); b = apr_bucket_socket_create(daemon->socket, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbin, b); b = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbin, b); /* Create alternate buffer for reading in response header values. */ if (group->header_buffer_size != 0) { header_buflen = group->header_buffer_size; header_buffer = apr_pcalloc(r->pool, header_buflen); } /* * If process reload mechanism enabled, or a queue timeout is * specified, then we need to look for marker indicating it * is okay to transfer content, or whether process is being * restarted and that we should therefore create a * connection to daemon process again. */ if (*config->process_group && (config->script_reloading || group->queue_timeout != 0)) { int retries = 0; int maximum = (2*group->processes)+1; /* * While special header indicates a restart is being * done, then keep trying to reconnect. Cap the number * of retries to at most about 2 times the number of * daemon processes in the process group. If still being * told things are being restarted, then we will error * indicating service is unavailable. */ while (retries < maximum) { /* Scan the CGI script like headers from daemon. */ status = wsgi_scan_headers_brigade(r, bbin, header_buffer, header_buflen); if (status != OK) return status; /* * Status must be 200 for our special headers. Ideally * we would use 0 as did in the past but Apache 2.4 * complains if use 0 as not a valid status value. */ if (r->status != 200) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Unexpected status from " "WSGI daemon process '%d'.", getpid(), r->status); r->status_line = NULL; return HTTP_INTERNAL_SERVER_ERROR; } if (!strcmp(r->status_line, "200 Continue")) { r->status_line = NULL; break; } if (!strcmp(r->status_line, "200 Timeout")) { r->status_line = NULL; return HTTP_GATEWAY_TIME_OUT; } if (strcmp(r->status_line, "200 Rejected")) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Unexpected status from " "WSGI daemon process '%d'.", getpid(), r->status); r->status_line = NULL; return HTTP_INTERNAL_SERVER_ERROR; } r->status_line = NULL; /* Need to close previous socket connection first. */ apr_socket_close(daemon->socket); /* Has maximum number of attempts been reached. */ if (retries == maximum) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "mod_wsgi (pid=%d): Maximum number of WSGI " "daemon process restart connects reached '%d'.", getpid(), maximum); return HTTP_SERVICE_UNAVAILABLE; } retries++; config->daemon_restarts++; ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, "mod_wsgi (pid=%d): Connect after WSGI daemon " "process restart, attempt #%d.", getpid(), retries); /* Connect and setup connection just like before. */ if ((status = wsgi_connect_daemon(r, daemon)) != OK) return status; if ((rv = wsgi_send_request(r, config, daemon)) != APR_SUCCESS) { ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r, "mod_wsgi (pid=%d): Unable to send request " "details to WSGI daemon process '%s' on '%s'.", getpid(), daemon->name, daemon->socket_path); return HTTP_INTERNAL_SERVER_ERROR; } apr_brigade_destroy(bbin); bbin = apr_brigade_create(r->pool, r->connection->bucket_alloc); b = apr_bucket_socket_create(daemon->socket, r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbin, b); b = apr_bucket_eos_create(r->connection->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bbin, b); } } /* * Need to reset request status value to HTTP_OK else it * screws up HTTP input filter when processing a POST * request with 100-continue requirement. */ r->status = HTTP_OK; /* * Transfer any request content which was provided. Note that we * actually frame each data block sent with same format as is used * for chunked transfer encoding. This will be decoded in the * daemon process. This is done so that the EOS can be properly * identified by the daemon process in the absence of a value for * CONTENT_LENGTH that can be relied on. The CONTENT_LENGTH is * dodgy when have mutating input filters and none will be present * at all if chunked request content was used. */ seen_eos = 0; child_stopped_reading = 0; bbout = apr_brigade_create(r->pool, r->connection->bucket_alloc); do { apr_bucket *bucket; rv = ap_get_brigade(r->input_filters, bbout, AP_MODE_READBYTES, APR_BLOCK_READ, HUGE_STRING_LEN); if (rv != APR_SUCCESS) { char status_buffer[512]; const char *error_message; error_message = apr_psprintf(r->pool, "Request data read " "error when proxying data to daemon process: %s", apr_strerror(rv, status_buffer, sizeof( status_buffer)-1)); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): %s.", getpid(), error_message); if (APR_STATUS_IS_TIMEUP(rv)) return HTTP_REQUEST_TIME_OUT; return HTTP_INTERNAL_SERVER_ERROR; } for (bucket = APR_BRIGADE_FIRST(bbout); bucket != APR_BRIGADE_SENTINEL(bbout); bucket = APR_BUCKET_NEXT(bucket)) { const char *data; apr_size_t len; char chunk_hdr[20]; apr_size_t hdr_len; struct iovec vec[3]; if (APR_BUCKET_IS_EOS(bucket)) { /* Send closing frame for chunked content. */ rv = wsgi_socket_send(daemon->socket, ASCII_ZERO ASCII_CRLF ASCII_CRLF, 5); if (rv != APR_SUCCESS) { char status_buffer[512]; const char *error_message; error_message = apr_psprintf(r->pool, "Request data write " "error when proxying data to daemon process: %s", apr_strerror(rv, status_buffer, sizeof( status_buffer)-1)); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): %s.", getpid(), error_message); } seen_eos = 1; break; } /* We can't do much with this. */ if (APR_BUCKET_IS_FLUSH(bucket)) { continue; } /* If the child stopped, we still must read to EOS. */ if (child_stopped_reading) { continue; } /* Read block. */ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ); if (rv != APR_SUCCESS) { char status_buffer[512]; const char *error_message; error_message = apr_psprintf(r->pool, "Request data read " "error when proxying data to daemon process: %s", apr_strerror(rv, status_buffer, sizeof( status_buffer)-1)); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): %s.", getpid(), error_message); break; } /* * Keep writing data to the child until done or too * much time elapses with no progress or an error * occurs. Frame the data being sent with format used * for chunked transfer encoding. */ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr), "%" APR_UINT64_T_HEX_FMT ASCII_CRLF, (apr_uint64_t)len); vec[0].iov_base = (void *)chunk_hdr; vec[0].iov_len = hdr_len; vec[1].iov_base = (void *)data; vec[1].iov_len = len; vec[2].iov_base = (void *)ASCII_CRLF; vec[2].iov_len = 2; rv = wsgi_socket_sendv(daemon->socket, vec, 3); if (rv != APR_SUCCESS) { char status_buffer[512]; const char *error_message; error_message = apr_psprintf(r->pool, "Request data write " "error when proxying data to daemon process: %s", apr_strerror(rv, status_buffer, sizeof( status_buffer)-1)); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): %s.", getpid(), error_message); /* Daemon stopped reading, discard remainder. */ child_stopped_reading = 1; } } apr_brigade_cleanup(bbout); } while (!seen_eos); /* * Close socket for writing so that daemon detects end of * request content. */ apr_socket_shutdown(daemon->socket, APR_SHUTDOWN_WRITE); /* Scan the CGI script like headers from daemon. */ status = wsgi_scan_headers_brigade(r, bbin, header_buffer, header_buflen); if (status != OK) return status; /* * Look for the special case of status being 200 but the * status line indicating an error and translate it into a * 500 error so that error document processing will occur * for those cases where WSGI application wouldn't have * supplied their own error document. We used to use 0 * here for status but Apache 2.4 prohibits it now. */ if (r->status == 200 && !strcmp(r->status_line, "200 Error")) { r->status_line = NULL; return HTTP_INTERNAL_SERVER_ERROR; } /* * Look for 'Location' header and if an internal * redirect, execute the redirect. This behaviour is * consistent with how mod_cgi and mod_cgid work and * what is permitted by the CGI specification. */ location = apr_table_get(r->headers_out, "Location"); if (location && location[0] == '/' && r->status == 200) { /* * Discard all response content returned from * the daemon process. */ wsgi_discard_output(bbin); apr_brigade_destroy(bbin); /* * The internal redirect needs to be a GET no * matter what the original method was. */ r->method = apr_pstrdup(r->pool, "GET"); r->method_number = M_GET; /* * We already read the message body (if any), so * don't allow the redirected request to think * it has one. Not sure if we need to worry * about removing 'Transfer-Encoding' header. */ apr_table_unset(r->headers_in, "Content-Length"); ap_internal_redirect_handler(location, r); return OK; } /* * Allow the web server to override any error * page produced by the WSGI application. */ if (config->error_override && ap_is_HTTP_ERROR(r->status)) { status = r->status; r->status = HTTP_OK; r->status_line = NULL; /* * Discard all response content returned from * the daemon process if any expected. */ if (!r->header_only && /* not HEAD request */ (status != HTTP_NO_CONTENT) && /* not 204 */ (status != HTTP_NOT_MODIFIED)) { /* not 304 */ wsgi_discard_output(bbin); apr_brigade_destroy(bbin); } return status; } /* Transfer any response content. */ return wsgi_transfer_response(r, bbin, group->response_buffer_size, group->response_socket_timeout); } static apr_status_t wsgi_socket_read(apr_socket_t *sock, void *vbuf, apr_size_t size) { char *buf = vbuf; apr_status_t rv; apr_size_t count = 0; apr_size_t len = 0; do { len = size - count; if ((rv = apr_socket_recv(sock, buf + count, &len)) != APR_SUCCESS) return rv; count += len; } while (count < size); return APR_SUCCESS; } static apr_status_t wsgi_read_strings(apr_socket_t *sock, char ***s, apr_pool_t *p) { apr_status_t rv; apr_size_t total; apr_size_t n; apr_size_t i; apr_size_t l; char *buffer; char *offset; if ((rv = wsgi_socket_read(sock, &total, sizeof(total))) != APR_SUCCESS) return rv; buffer = apr_palloc(p, total); offset = buffer; if ((rv = wsgi_socket_read(sock, buffer, total)) != APR_SUCCESS) return rv; memcpy(&n, offset, sizeof(n)); offset += sizeof(n); *s = apr_pcalloc(p, (n+1)*sizeof(**s)); for (i = 0; i < n; i++) { l = strlen(offset) + 1; (*s)[i] = offset; offset += l; } return APR_SUCCESS; } static apr_status_t wsgi_read_request(apr_socket_t *sock, request_rec *r) { int rv; char **vars; /* Read subprocess environment from request object. */ rv = wsgi_read_strings(sock, &vars, r->pool); if (rv != APR_SUCCESS) return rv; while (*vars) { char *key = *vars++; apr_table_setn(r->subprocess_env, key, *vars++); } return APR_SUCCESS; } static ap_filter_rec_t *wsgi_header_filter_handle; static apr_status_t wsgi_header_filter(ap_filter_t *f, apr_bucket_brigade *b) { request_rec *r = f->r; struct iovec vec1[4]; apr_bucket_brigade *b2; char crlf[] = CRLF; apr_size_t buflen; const apr_array_header_t *elts; const apr_table_entry_t *t_elt; const apr_table_entry_t *t_end; struct iovec *vec2; struct iovec *vec2_next; /* Output status line. */ if (!r->status_line) r->status_line = ap_get_status_line(r->status); vec1[0].iov_base = (void *)"Status:"; vec1[0].iov_len = strlen("Status:"); vec1[1].iov_base = (void *)" "; vec1[1].iov_len = sizeof(" ") - 1; vec1[2].iov_base = (void *)(r->status_line); vec1[2].iov_len = strlen(r->status_line); vec1[3].iov_base = (void *)CRLF; vec1[3].iov_len = sizeof(CRLF) - 1; b2 = apr_brigade_create(r->pool, r->connection->bucket_alloc); apr_brigade_writev(b2, NULL, NULL, vec1, 4); /* Merge response header tables together. */ if (!apr_is_empty_table(r->err_headers_out)) { r->headers_out = apr_table_overlay(r->pool, r->err_headers_out, r->headers_out); } /* Override the content type for response. */ if (r->content_type) apr_table_setn(r->headers_out, "Content-Type", r->content_type); /* Formt the response headers for output. */ elts = apr_table_elts(r->headers_out); if (elts->nelts != 0) { t_elt = (const apr_table_entry_t *)(elts->elts); t_end = t_elt + elts->nelts; vec2 = (struct iovec *)apr_palloc(r->pool, 4 * elts->nelts * sizeof(struct iovec)); vec2_next = vec2; do { vec2_next->iov_base = (void*)(t_elt->key); vec2_next->iov_len = strlen(t_elt->key); vec2_next++; vec2_next->iov_base = ": "; vec2_next->iov_len = sizeof(": ") - 1; vec2_next++; vec2_next->iov_base = (void*)(t_elt->val); vec2_next->iov_len = strlen(t_elt->val); vec2_next++; vec2_next->iov_base = CRLF; vec2_next->iov_len = sizeof(CRLF) - 1; vec2_next++; t_elt++; } while (t_elt < t_end); apr_brigade_writev(b2, NULL, NULL, vec2, vec2_next - vec2); } /* Format terminating blank line for response headers. */ buflen = strlen(crlf); apr_brigade_write(b2, NULL, NULL, crlf, buflen); /* Output the response headers. */ ap_pass_brigade(f->next, b2); /* Remove ourselves from filter chain so we aren't called again. */ ap_remove_output_filter(f); /* Output the partial response content. */ return ap_pass_brigade(f->next, b); } typedef struct cve_2013_5704_fields cve_2013_5704_fields; typedef struct cve_2013_5704_apache22 cve_2013_5704_apache22; typedef struct cve_2013_5704_apache24 cve_2013_5704_apache24; struct cve_2013_5704_fields { apr_table_t *trailers_in; apr_table_t *trailers_out; }; struct cve_2013_5704_apache22 { struct ap_filter_t *proto_input_filters; int eos_sent; cve_2013_5704_fields fields; }; struct cve_2013_5704_apache24 { apr_sockaddr_t *useragent_addr; char *useragent_ip; cve_2013_5704_fields fields; }; static int wsgi_hook_daemon_handler(conn_rec *c) { apr_socket_t *csd; request_rec *r; apr_pool_t *p; apr_status_t rv; char *key; apr_sockaddr_t *addr; const char *filename; const char *script; const char *magic; const char *hash; WSGIRequestConfig *config; apr_bucket *e; apr_bucket_brigade *bb; core_request_config *req_cfg; core_dir_config *d; ap_filter_t *current = NULL; ap_filter_t *next = NULL; const char *item; int queue_timeout_occurred = 0; apr_time_t daemon_start = 0; #if ! (AP_MODULE_MAGIC_AT_LEAST(20120211, 37) || \ (AP_SERVER_MAJORVERSION_NUMBER == 2 && \ AP_SERVER_MINORVERSION_NUMBER <= 2 && \ AP_MODULE_MAGIC_AT_LEAST(20051115, 36))) apr_size_t size = 0; #endif /* Don't do anything if not in daemon process. */ if (!wsgi_daemon_pool) return DECLINED; /* * Mark this as start of daemon process even though connection * setup has already been done. Otherwise need to carry through * a time value somehow. */ daemon_start = apr_time_now(); /* * Remove all input/output filters except the core filters. * This will ensure that any SSL filters we don't want are * removed. This is a bit of a hack. Only other option is to * duplicate the code for core input/output filters so can * avoid full Apache connection processing, which is what is * installed the SSL filters and possibly other filters for * logging etc. */ current = c->input_filters; next = current->next; while (current) { if (current->frec == ap_core_input_filter_handle) { current = next; if (!current) break; next = current->next; continue; } ap_remove_input_filter(current); current = next; if (current) next = current->next; } current = c->output_filters; next = current->next; while (current) { if (current->frec == ap_core_output_filter_handle) { current = next; if (!current) break; next = current->next; continue; } ap_remove_output_filter(current); current = next; if (current) next = current->next; } /* * Create and populate our own request object. We allocate more * memory than we require here for the request_rec in order to * implement an opimistic hack for the case where mod_wsgi is built * against an Apache version prior to CVE-2013-6704 being applied to * it. If that Apache is upgraded but mod_wsgi not recompiled then * it will crash in daemon mode. We therefore use the extra space to * set the structure members which are added by CVE-2013-6704 to try * and avoid that situation. Note that this is distinct from the * hack down below to deal with where mod_wsgi was compiled against * an Apache version which had CVE-2013-6704 backported. */ apr_pool_create(&p, c->pool); r = apr_pcalloc(p, sizeof(request_rec)+sizeof(cve_2013_5704_fields)); r->pool = p; r->connection = c; r->server = c->base_server; r->user = NULL; r->ap_auth_type = NULL; r->allowed_methods = ap_make_method_list(p, 2); r->headers_in = apr_table_make(r->pool, 25); r->subprocess_env = apr_table_make(r->pool, 25); r->headers_out = apr_table_make(r->pool, 12); r->err_headers_out = apr_table_make(r->pool, 5); r->notes = apr_table_make(r->pool, 5); r->request_config = ap_create_request_config(r->pool); r->proto_output_filters = c->output_filters; r->output_filters = r->proto_output_filters; r->proto_input_filters = c->input_filters; r->input_filters = r->proto_input_filters; #if AP_MODULE_MAGIC_AT_LEAST(20120211, 37) || \ (AP_SERVER_MAJORVERSION_NUMBER == 2 && \ AP_SERVER_MINORVERSION_NUMBER <= 2 && \ AP_MODULE_MAGIC_AT_LEAST(20051115, 36)) /* * New request_rec fields were added to Apache because of changes * related to CVE-2013-5704. The change means that mod_wsgi version * 4.4.0-4.4.5 will crash if run on the Apache versions with the * addition fields if mod_wsgi daemon mode is used. If we are using * Apache 2.2.29 or 2.4.11, we set the fields direct against the * new structure members. */ r->trailers_in = apr_table_make(r->pool, 5); r->trailers_out = apr_table_make(r->pool, 5); #else /* * We use a huge hack here to try and identify when CVE-2013-5704 * has been back ported to older Apache version. This is necessary * as when backported the Apache module magic number will not be * updated and it isn't possible to determine from that at compile * time if the new structure members exist and so that they should * be set. We therefore try and work out whether the extra structure * members exist through looking at the size of request_rec and * whether memory has been allocated above what is known to be the * last member in the structure before the new members were added. */ #if AP_SERVER_MINORVERSION_NUMBER <= 2 size = offsetof(request_rec, eos_sent); size += sizeof(r->eos_sent); #else size = offsetof(request_rec, useragent_ip); size += sizeof(r->useragent_ip); #endif /* * Check whether request_rec is at least as large as minimal size * plus the size of the extra fields. If it is, then we need to * set the additional fields. */ if (sizeof(request_rec) >= size + sizeof(cve_2013_5704_fields)) { #if AP_SERVER_MINORVERSION_NUMBER <= 2 cve_2013_5704_apache22 *rext; rext = (cve_2013_5704_apache22 *)&r->proto_input_filters; #else cve_2013_5704_apache24 *rext; rext = (cve_2013_5704_apache24 *)&r->useragent_addr; #endif rext->fields.trailers_in = apr_table_make(r->pool, 5); rext->fields.trailers_out = apr_table_make(r->pool, 5); } else { /* * Finally, to allow forward portability of a compiled mod_wsgi * binary from an Apache version without the CVE-2013-5704 * change to one where it is, without needing to recompile * mod_wsgi, we set fields in the extra memory we added before * the actual request_rec. */ cve_2013_5704_fields *rext; rext = (cve_2013_5704_fields *)(r+1); rext->trailers_in = apr_table_make(r->pool, 5); rext->trailers_out = apr_table_make(r->pool, 5); } #endif r->per_dir_config = r->server->lookup_defaults; /* * Try and ensure that request body limit in daemon mode process * is unlimited as Apache 2.4.54 changed rules for limit and if * unset is now overridden by HTTP filters to be 1GiB rather than * unlimited. This is required since we populate configuration * from the base server config only so setting unlimited in a more * specific context such as a virtual host wouldn't be visible. * Note that setting this to unlimited in the daemon mode process * is okay as the request limit body is checked in the Apache * child process before request is proxied specifically to avoid * unecessarily passing the content across to the daemon process. * Note also that the reason that the change is applied for all * Apache 2.4.X versions is because a module compiled against an * Apache version before the change, can still be used with newer * Apache version without being re-compiled which would result in * a crash also. We can't enable for older Apache 2.X versions as * ap_get_core_module_config() doesn't exist. */ #if (AP_SERVER_MAJORVERSION_NUMBER == 2 && \ AP_SERVER_MINORVERSION_NUMBER >= 4) d = (core_dir_config *)ap_get_core_module_config(r->per_dir_config); d->limit_req_body = 0; #endif r->sent_bodyct = 0; r->read_length = 0; r->read_body = REQUEST_NO_BODY; r->status = HTTP_OK; r->status_line = NULL; r->the_request = NULL; r->used_path_info = AP_REQ_DEFAULT_PATH_INFO; /* * Install our own output filter for writing back headers in * CGI script style. */ ap_add_output_filter_handle(wsgi_header_filter_handle, NULL, r, r->connection); /* Create and install the WSGI request config. */ config = (WSGIRequestConfig *)apr_pcalloc(r->pool, sizeof(WSGIRequestConfig)); ap_set_module_config(r->request_config, &wsgi_module, (void *)config); /* Grab the socket from the connection core config. */ csd = ap_get_module_config(c->conn_config, &core_module); /* * Fake up parts of the internal per request core * configuration. If we don't do this then when Apache is * compiled with the symbol AP_DEBUG, internal checks made * by Apache will result in process crashing. */ req_cfg = apr_pcalloc(r->pool, sizeof(core_request_config)); req_cfg->bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); ap_set_module_config(r->request_config, &core_module, req_cfg); /* Read in the request details and setup request object. */ if ((rv = wsgi_read_request(csd, r)) != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server, "mod_wsgi (pid=%d): Unable to read WSGI request.", getpid()); apr_pool_destroy(p); return HTTP_INTERNAL_SERVER_ERROR; } /* Check magic marker used to validate origin of request. */ filename = apr_table_get(r->subprocess_env, "SCRIPT_FILENAME"); script = apr_table_get(r->subprocess_env, "mod_wsgi.handler_script"); magic = apr_table_get(r->subprocess_env, "mod_wsgi.magic"); if (!magic) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server, "mod_wsgi (pid=%d): Request origin could not be " "validated.", getpid()); apr_pool_destroy(p); return HTTP_INTERNAL_SERVER_ERROR; } key = apr_psprintf(r->pool, "%ld|%s|%s|%s", wsgi_daemon_process->group->random, wsgi_daemon_process->group->socket_path, filename, script); hash = ap_md5(r->pool, (const unsigned char *)key); memset(key, '\0', strlen(key)); if (strcmp(magic, hash) != 0) { ap_log_error(APLOG_MARK, APLOG_ALERT, rv, wsgi_server, "mod_wsgi (pid=%d): Request origin could not be " "validated.", getpid()); apr_pool_destroy(p); return HTTP_INTERNAL_SERVER_ERROR; } apr_table_unset(r->subprocess_env, "mod_wsgi.magic"); /* * If we are executing in a chroot environment, we need to * adjust SCRIPT_FILENAME to remove leading portion of path * that corresponds to the location of the chroot directory. * Also need to adjust DOCUMENT_ROOT as well, although in * that case if it doesn't actually fall within the choot * directory, we just delete it outright as would be incorrect * if that directory lay outside of the chroot directory. */ if (wsgi_daemon_process->group->root) { const char *root; const char *path; root = wsgi_daemon_process->group->root; path = filename; if (strstr(path, root) == path && path[strlen(root)] == '/') { path += strlen(root); apr_table_set(r->subprocess_env, "SCRIPT_FILENAME", path); filename = path; } else { ap_log_error(APLOG_MARK, APLOG_CRIT, rv, wsgi_server, "mod_wsgi (pid=%d): WSGI script '%s' not located " "within chroot directory '%s'.", getpid(), path, root); return HTTP_INTERNAL_SERVER_ERROR; } path = (char *)apr_table_get(r->subprocess_env, "DOCUMENT_ROOT"); if (strstr(path, root) == path) { path += strlen(root); apr_table_set(r->subprocess_env, "DOCUMENT_ROOT", path); } else { apr_table_unset(r->subprocess_env, "DOCUMENT_ROOT"); } } r->filename = (char *)filename; /* Recalculate WSGI script or handler script modification time. */ if (script && *script) { if ((rv = apr_stat(&r->finfo, script, APR_FINFO_NORM, r->pool)) != APR_SUCCESS) { /* * Don't fail at this point. Allow the lack of file to * be detected later when trying to load the script file. */ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, wsgi_server, "mod_wsgi (pid=%d): Unable to stat target handler " "script '%s'.", getpid(), script); r->finfo.mtime = 0; } } else { if ((rv = apr_stat(&r->finfo, filename, APR_FINFO_NORM, r->pool)) != APR_SUCCESS) { /* * Don't fail at this point. Allow the lack of file to * be detected later when trying to load the script file. */ ap_log_error(APLOG_MARK, APLOG_WARNING, rv, wsgi_server, "mod_wsgi (pid=%d): Unable to stat target WSGI " "script '%s'.", getpid(), filename); r->finfo.mtime = 0; } } /* * Trigger mapping of host information to server configuration * so that when logging errors they go to the correct error log * file for the host. */ #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) r->connection->client_ip = (char *)apr_table_get(r->subprocess_env, "REMOTE_ADDR"); r->connection->client_addr->port = atoi(apr_table_get(r->subprocess_env, "REMOTE_PORT")); #else r->connection->remote_ip = (char *)apr_table_get(r->subprocess_env, "REMOTE_ADDR"); r->connection->remote_addr->port = atoi(apr_table_get(r->subprocess_env, "REMOTE_PORT")); #endif #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) r->useragent_addr = c->client_addr; r->useragent_ip = c->client_ip; #endif key = apr_psprintf(p, "%s|%s", apr_table_get(r->subprocess_env, "mod_wsgi.listener_host"), apr_table_get(r->subprocess_env, "mod_wsgi.listener_port")); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Server listener address '%s'.", getpid(), key); } addr = (apr_sockaddr_t *)apr_hash_get(wsgi_daemon_listeners, key, APR_HASH_KEY_STRING); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Server listener address '%s' was" "%s found.", getpid(), key, addr ? "" : " not"); } if (addr) { c->local_addr = addr; } ap_update_vhost_given_ip(r->connection); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Connection server matched was " "'%s|%d'.", getpid(), c->base_server->server_hostname, c->base_server->port); } r->server = c->base_server; if (apr_table_get(r->subprocess_env, "HTTP_HOST")) { apr_table_setn(r->headers_in, "Host", apr_table_get(r->subprocess_env, "HTTP_HOST")); } ap_update_vhost_from_headers(r); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Request server matched was '%s|%d'.", getpid(), r->server->server_hostname, r->server->port); } /* * Set content length of any request content and add the * standard HTTP input filter so that standard input routines * for request content will work. */ item = apr_table_get(r->subprocess_env, "CONTENT_LENGTH"); if (item) apr_table_setn(r->headers_in, "Content-Length", item); /* Set details of WSGI specific request config. */ config->process_group = apr_table_get(r->subprocess_env, "mod_wsgi.process_group"); config->application_group = apr_table_get(r->subprocess_env, "mod_wsgi.application_group"); config->callable_object = apr_table_get(r->subprocess_env, "mod_wsgi.callable_object"); config->handler_script = apr_table_get(r->subprocess_env, "mod_wsgi.handler_script"); config->script_reloading = atoi(apr_table_get(r->subprocess_env, "mod_wsgi.script_reloading")); item = apr_table_get(r->subprocess_env, "mod_wsgi.enable_sendfile"); if (item && !strcasecmp(item, "1")) config->enable_sendfile = 1; else config->enable_sendfile = 0; item = apr_table_get(r->subprocess_env, "mod_wsgi.ignore_activity"); if (item && !strcasecmp(item, "1")) config->ignore_activity = 1; else config->ignore_activity = 0; config->daemon_connects = atoi(apr_table_get(r->subprocess_env, "mod_wsgi.daemon_connects")); config->daemon_restarts = atoi(apr_table_get(r->subprocess_env, "mod_wsgi.daemon_restarts")); item = apr_table_get(r->subprocess_env, "mod_wsgi.request_start"); if (item) { errno = 0; config->request_start = apr_strtoi64(item, (char **)&item, 10); if (!*item && errno != ERANGE) r->request_time = config->request_start; else config->request_start = 0.0; } item = apr_table_get(r->subprocess_env, "mod_wsgi.queue_start"); if (item) { errno = 0; config->queue_start = apr_strtoi64(item, (char **)&item, 10); if (!(!*item && errno != ERANGE)) config->queue_start = 0.0; } config->daemon_start = daemon_start; apr_table_setn(r->subprocess_env, "mod_wsgi.daemon_start", apr_psprintf(r->pool, "%" APR_TIME_T_FMT, config->daemon_start)); #if AP_MODULE_MAGIC_AT_LEAST(20100923,2) item = apr_table_get(r->subprocess_env, "mod_wsgi.request_id"); if (item) r->log_id = item; item = apr_table_get(r->subprocess_env, "mod_wsgi.connection_id"); if (item) r->connection->log_id = item; #endif /* * Install the standard HTTP input filter and set header for * chunked transfer encoding to force it to dechunk the input. * This is necessary as we chunk the data that is proxied to * the daemon processes so that we can determining whether we * actually receive all input or it was truncated. * * Note that the subprocess_env table that gets passed to the * WSGI environ dictionary has already been populated, so the * Transfer-Encoding header will not be passed in the WSGI * environ dictionary as a result of this. */ apr_table_setn(r->headers_in, "Transfer-Encoding", "chunked"); ap_add_input_filter("HTTP_IN", NULL, r, r->connection); /* Check for queue timeout. */ r->status = HTTP_OK; if (wsgi_daemon_process->group->queue_timeout) { if (config->request_start) { apr_time_t queue_time = 0; queue_time = config->daemon_start - config->request_start; if (queue_time > wsgi_daemon_process->group->queue_timeout) { queue_timeout_occurred = 1; r->status = HTTP_INTERNAL_SERVER_ERROR; r->status_line = "200 Timeout"; ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Queue timeout expired " "for WSGI daemon process '%s'.", getpid(), wsgi_daemon_process->group->name); } } } /* * Execute the actual target WSGI application. In * normal cases OK should always be returned. If * however an error occurs in importing or executing * the script or the Python code raises an exception * which is not caught and handled, then an internal * server error can be returned. As we don't want to * be triggering any error document handlers in the * daemon process we use a fake status line with 0 * as the status value. This will be picked up in * the Apache child process which will translate it * back to a 500 error so that normal error document * processing occurs. */ if (!queue_timeout_occurred) { if (wsgi_execute_script(r) != OK) { r->status = HTTP_INTERNAL_SERVER_ERROR; r->status_line = "200 Error"; } } /* * Ensure that request is finalised and any response * is flushed out. This will as a side effect read * any input data which wasn't consumed, thus * ensuring that the Apache child process isn't hung * waiting to send the request content and can * therefore process the response correctly. */ ap_finalize_request_protocol(r); bb = apr_brigade_create(r->pool, c->bucket_alloc); e = apr_bucket_flush_create(c->bucket_alloc); APR_BRIGADE_INSERT_HEAD(bb, e); ap_pass_brigade(r->connection->output_filters, bb); apr_pool_destroy(p); return OK; } #endif /* * Apache 2.X module initialisation functions. */ static int wsgi_hook_init(apr_pool_t *pconf, apr_pool_t *ptemp, apr_pool_t *plog, server_rec *s) { void *data = NULL; const char *userdata_key; char package[128]; char interpreter[256]; int status = OK; /* * No longer support using mod_python at the same time as * mod_wsgi as becoming too painful to hack around * mod_python's broken usage of threading APIs when align * code to the stricter API requirements of Python 3.2. */ userdata_key = "python_init"; apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (data) { ap_log_error(APLOG_MARK, APLOG_CRIT, 0, NULL, "mod_wsgi (pid=%d): The mod_python module can " "not be used in conjunction with mod_wsgi 4.0+. " "Remove the mod_python module from the Apache " "configuration.", getpid()); return HTTP_INTERNAL_SERVER_ERROR; } /* * Init function gets called twice during startup, we only * need to actually do anything on the second time it is * called. This avoids unecessarily initialising and then * destroying Python for no reason. We also though have to * deal with a special case when a graceful restart is done. * For that we are only called once, which is generally okay * as the 'wsgi_init' key will be set from initial start up * of the server. The exception to this is where the module * is only loaded into Apache when the server is already * running. In this case we have to detect that it is not * the initial startup, but a subsequent restart. We can do * this by looking at whether the scoreboard has been * initialised yet. That is probably enough, but to be safe, * also check what generation it is. */ userdata_key = "wsgi_init"; apr_pool_userdata_get(&data, userdata_key, s->process->pool); if (!data) { apr_pool_userdata_set((const void *)1, userdata_key, apr_pool_cleanup_null, s->process->pool); /* * Check for the special case of a graceful restart and * the module being loaded for the first time. In this * case we still go onto perform initialisation as the * initialisation routine for the module will not be * called a second time. */ if (!ap_scoreboard_image || ap_get_scoreboard_global()->running_generation == 0) { return OK; } } /* Setup module version information. */ sprintf(package, "mod_wsgi/%s", MOD_WSGI_VERSION_STRING); ap_add_version_component(pconf, package); /* Record Python version string with Apache. */ sprintf(interpreter, "Python/%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); ap_add_version_component(pconf, interpreter); /* Retain reference to base server. */ wsgi_server = s; /* Retain record of parent process ID. */ wsgi_parent_pid = getpid(); /* Determine whether multiprocess and/or multithread. */ ap_mpm_query(AP_MPMQ_IS_THREADED, &wsgi_multithread); if (wsgi_multithread != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_THREADS, &wsgi_multithread); wsgi_multithread = (wsgi_multithread != 1); } ap_mpm_query(AP_MPMQ_IS_FORKED, &wsgi_multiprocess); if (wsgi_multiprocess != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &wsgi_multiprocess); wsgi_multiprocess = (wsgi_multiprocess != 1); } /* Retain reference to main server config. */ wsgi_server_config = ap_get_module_config(s->module_config, &wsgi_module); /* * Check that the version of Python found at * runtime is what was used at compilation. * * XXX Can't do this as will cause Anaconda * Python to fail as not safe to call the * Py_GetVersion() function before one calls * the Py_Initialize() function when using * Anaconda Python. */ #if 0 wsgi_python_version(); #endif /* * Initialise Python if required to be done in * the parent process. Note that it will not be * initialised if mod_python loaded and it has * already been done. */ if (wsgi_python_required == -1) wsgi_python_required = 1; if (!wsgi_python_after_fork) wsgi_python_init(pconf); /* * Startup separate named daemon processes. This is * a bit tricky as we only want to do this after the * scoreboard has been created. On the initial server * startup though, this hook function is called prior * to the MPM being run, which means the scoreboard * hasn't been created yet. In that case we need to * defer process creation until after that, which we * can only do by hooking into the pre_mpm hook after * scoreboard creation has been done. On a server * restart, the scoreboard will be preserved, so we * can do it here, which is just as well as the pre_mpm * hook isn't run on a restart. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (!ap_scoreboard_image) { /* * Need to remember the pool we were given here as * the pre_mpm hook functions get given a different * pool which isn't the one we want and if we use * that then Apache will crash when it is being * shutdown. So our pre_mpm hook will use the pool * we have remembered here. */ wsgi_pconf_pool = pconf; ap_hook_pre_mpm(wsgi_deferred_start_daemons, NULL, NULL, APR_HOOK_REALLY_LAST); } else status = wsgi_start_daemons(pconf); #endif return status; } static void wsgi_hook_child_init(apr_pool_t *p, server_rec *s) { #if defined(MOD_WSGI_WITH_DAEMONS) WSGIProcessGroup *entries = NULL; WSGIProcessGroup *entry = NULL; int i; /* Close listener sockets for daemon processes. */ if (wsgi_daemon_list) { entries = (WSGIProcessGroup *)wsgi_daemon_list->elts; for (i = 0; i < wsgi_daemon_list->nelts; ++i) { entry = &entries[i]; if (entry->listener_fd != -1) { close(entry->listener_fd); entry->listener_fd = -1; } } } #endif /* Remember worker process ID. */ wsgi_worker_pid = getpid(); /* Time child process started waiting for requests. */ wsgi_restart_time = apr_time_now(); /* Create lock for request monitoring. */ apr_thread_mutex_create(&wsgi_monitor_lock, APR_THREAD_MUTEX_UNNESTED, p); if (wsgi_python_required) { /* * Initialise Python if required to be done in * the child process. Note that it will not be * initialised if mod_python loaded and it has * already been done. */ if (wsgi_python_after_fork) wsgi_python_init(p); /* * Now perform additional initialisation steps * always done in child process. */ wsgi_python_child_init(p); } } #include "apr_lib.h" static char *wsgi_original_uri(request_rec *r) { char *first, *last; if (r->the_request == NULL) { return (char *) apr_pcalloc(r->pool, 1); } first = r->the_request; /* use the request-line */ while (*first && !apr_isspace(*first)) { ++first; /* skip over the method */ } while (apr_isspace(*first)) { ++first; /* and the space(s) */ } last = first; while (*last && !apr_isspace(*last)) { ++last; /* end at next whitespace */ } return apr_pstrmemdup(r->pool, first, last - first); } static int wsgi_http_invalid_header(const char *w) { char c; while ((c = *w++) != 0) { if (!apr_isalnum(c) && c != '-') return 1; } return 0; } static void wsgi_drop_invalid_headers(request_rec *r) { /* * Apache 2.2 when converting headers for CGI variables, doesn't * ignore headers with invalid names. That is, any which use any * characters besides alphanumerics and the '-' character. This * opens us up to header spoofing whereby something can inject * multiple headers which differ by using non alphanumeric * characters in the same position, which would then encode to same * value. Since not easy to cleanup after the fact, as a workaround, * is easier to simply remove the invalid headers. This will make * things end up being the same as Apache 2.4. Doing this could * annoy some users of Apache 2.2 who were using invalid headers, * but things will break for them under Apache 2.4 anyway. */ apr_array_header_t *to_delete = NULL; const apr_array_header_t *hdrs_arr; const apr_table_entry_t *hdrs; int i; hdrs_arr = apr_table_elts(r->headers_in); hdrs = (const apr_table_entry_t *) hdrs_arr->elts; for (i = 0; i < hdrs_arr->nelts; ++i) { if (!hdrs[i].key) { continue; } if (wsgi_http_invalid_header(hdrs[i].key)) { char **new; if (!to_delete) to_delete = apr_array_make(r->pool, 1, sizeof(char *)); new = (char **)apr_array_push(to_delete); *new = hdrs[i].key; } } if (to_delete) { char *key; for (i = 0; i < to_delete->nelts; i++) { key = ((char **)to_delete->elts)[i]; apr_table_unset(r->headers_in, key); } } } static const char *wsgi_proxy_client_headers[] = { "HTTP_X_FORWARDED_FOR", "HTTP_X_CLIENT_IP", "HTTP_X_REAL_IP", NULL, }; static const char *wsgi_proxy_scheme_headers[] = { "HTTP_X_FORWARDED_HTTPS", "HTTP_X_FORWARDED_PROTO", "HTTP_X_FORWARDED_SCHEME", "HTTP_X_FORWARDED_SSL", "HTTP_X_HTTPS", "HTTP_X_SCHEME", NULL, }; static const char *wsgi_proxy_host_headers[] = { "HTTP_X_FORWARDED_HOST", "HTTP_X_HOST", NULL, }; static const char *wsgi_proxy_script_name_headers[] = { "HTTP_X_SCRIPT_NAME", "HTTP_X_FORWARDED_SCRIPT_NAME", NULL, }; static int wsgi_ip_is_in_array(apr_sockaddr_t *client_ip, apr_array_header_t *proxy_ips) { int i; apr_ipsubnet_t **subs = (apr_ipsubnet_t **)proxy_ips->elts; for (i = 0; i < proxy_ips->nelts; i++) { if (apr_ipsubnet_test(subs[i], client_ip)) { return 1; } } return 0; } static void wsgi_process_forwarded_for(request_rec *r, WSGIRequestConfig *config, const char *value ) { if (config->trusted_proxies) { /* * A potentially comma separated list where client we are * interested in will be that immediately before the last * trusted proxy working from the end forwards. If there * are no trusted proxies then we use the last. */ apr_array_header_t *arr; arr = apr_array_make(r->pool, 3, sizeof(char *)); while (*value != '\0') { /* Skip leading whitespace for item. */ while (*value != '\0' && apr_isspace(*value)) value++; if (*value != '\0') { const char *end = NULL; const char *next = NULL; char **entry = NULL; end = value; while (*end != '\0' && *end != ',') end++; if (*end == '\0') next = end; else if (*end == ',') next = end+1; /* Need deal with trailing whitespace. */ while (end != value) { if (!apr_isspace(*(end-1))) break; end--; } entry = (char **)apr_array_push(arr); *entry = apr_pstrndup(r->pool, value, (end-value)); value = next; } } if (arr->nelts != 0) { /* HTTP_X_FORDWARDED_FOR wasn't just an empty string. */ char **items; int first = -1; int i; items = (char **)arr->elts; /* * Work out the position of the IP closest to the start * that we actually trusted. */ for (i=arr->nelts; i>0; ) { apr_sockaddr_t *sa; apr_status_t rv; i--; rv = apr_sockaddr_info_get(&sa, items[i], APR_UNSPEC, 0, 0, r->pool); if (rv == APR_SUCCESS) { if (!wsgi_ip_is_in_array(sa, config->trusted_proxies)) break; first = i; } else { ap_log_rerror(APLOG_MARK, APLOG_NOERRNO|APLOG_DEBUG, 0, r, "mod_wsgi (pid=%d): Forwarded IP of \"%s\" is " "not a valid IP address.", getpid(), items[i]); break; } } if (first >= 0) { /* * We found at least one trusted IP. We use the * IP that may have appeared before that as * REMOTE_ADDR. We rewrite HTTP_X_FORWARDED_FOR * to record only from REMOTE_ADDR onwards. */ char *list; i = first-1; if (i<0) i = 0; apr_table_setn(r->subprocess_env, "REMOTE_ADDR", items[i]); list = items[i]; i++; while (arr->nelts != i) { list = apr_pstrcat(r->pool, list, ", ", items[i], NULL); i++; } apr_table_setn(r->subprocess_env, "HTTP_X_FORWARDED_FOR", list); } else { /* * No trusted IP. Use the last for REMOTE_ADDR. * We rewrite HTTP_X_FORWARDED_FOR to record only * the last. */ apr_table_setn(r->subprocess_env, "REMOTE_ADDR", items[arr->nelts-1]); apr_table_setn(r->subprocess_env, "HTTP_X_FORWARDED_FOR", items[arr->nelts-1]); } } } else { /* * We do not need to validate the proxies. We will have a * potentially comma separated list where the client we * are interested in will be listed first. */ const char *end = NULL; /* Skip leading whitespace for item. */ while (*value != '\0' && apr_isspace(*value)) value++; if (*value != '\0') { end = value; while (*end != '\0' && *end != ',') end++; /* Need deal with trailing whitespace. */ while (end != value) { if (!apr_isspace(*(end-1))) break; end--; } /* Override REMOTE_ADDR. Leave HTTP_X_FORWARDED_FOR. */ apr_table_setn(r->subprocess_env, "REMOTE_ADDR", apr_pstrndup(r->pool, value, (end-value))); } } } static void wsgi_process_proxy_headers(request_rec *r) { WSGIRequestConfig *config = NULL; apr_array_header_t *trusted_proxy_headers = NULL; int match_client_header = 0; int match_host_header = 0; int match_script_name_header = 0; int match_scheme_header = 0; const char *trusted_client_header = NULL; const char *trusted_host_header = NULL; const char *trusted_script_name_header = NULL; const char *trusted_scheme_header = NULL; int i = 0; int trusted_proxy = 1; const char *client_ip = NULL; apr_status_t rv; config = (WSGIRequestConfig *)ap_get_module_config(r->request_config, &wsgi_module); trusted_proxy_headers = config->trusted_proxy_headers; /* Nothing to do if no trusted headers have been specified. */ if (!trusted_proxy_headers) return; /* * Check for any special processing required for each trusted * header which has been specified. We should only do this if * there was no list of trusted proxies, or if the client IP * was that of a trusted proxy. */ if (config->trusted_proxies) { client_ip = apr_table_get(r->subprocess_env, "REMOTE_ADDR"); if (client_ip) { apr_sockaddr_t *sa; rv = apr_sockaddr_info_get(&sa, client_ip, APR_UNSPEC, 0, 0, r->pool); if (rv == APR_SUCCESS) { if (!wsgi_ip_is_in_array(sa, config->trusted_proxies)) trusted_proxy = 0; } else { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "mod_wsgi (pid=%d): REMOTE_ADDR of \"%s\" is " "not a valid IP address.", getpid(), client_ip); trusted_proxy = 0; } } else trusted_proxy = 0; } if (trusted_proxy) { for (i=0; inelts; i++) { const char *name; const char *value; name = ((const char**)trusted_proxy_headers->elts)[i]; value = apr_table_get(r->subprocess_env, name); if (!strcmp(name, "HTTP_X_FORWARDED_FOR")) { match_client_header = 1; if (value) { wsgi_process_forwarded_for(r, config, value); trusted_client_header = name; } } else if (!strcmp(name, "HTTP_X_CLIENT_IP") || !strcmp(name, "HTTP_X_REAL_IP")) { match_client_header = 1; if (value) { /* Use the value as is. */ apr_table_setn(r->subprocess_env, "REMOTE_ADDR", value); trusted_client_header = name; } } else if (!strcmp(name, "HTTP_X_FORWARDED_HOST") || !strcmp(name, "HTTP_X_HOST")) { match_host_header = 1; if (value) { /* Use the value as is. May include a port. */ trusted_host_header = name; apr_table_setn(r->subprocess_env, "HTTP_HOST", value); } } else if (!strcmp(name, "HTTP_X_FORWARDED_SERVER")) { if (value) { /* Use the value as is. */ apr_table_setn(r->subprocess_env, "SERVER_NAME", value); } } else if (!strcmp(name, "HTTP_X_FORWARDED_PORT")) { if (value) { /* Use the value as is. */ apr_table_setn(r->subprocess_env, "SERVER_PORT", value); } } else if (!strcmp(name, "HTTP_X_SCRIPT_NAME") || !strcmp(name, "HTTP_X_FORWARDED_SCRIPT_NAME")) { match_script_name_header = 1; if (value) { /* * Use the value as is. We want to remember what the * original value for SCRIPT_NAME was though. */ apr_table_setn(r->subprocess_env, "mod_wsgi.mount_point", value); trusted_script_name_header = name; apr_table_setn(r->subprocess_env, "SCRIPT_NAME", value); } } else if (!strcmp(name, "HTTP_X_FORWARDED_PROTO") || !strcmp(name, "HTTP_X_FORWARDED_SCHEME") || !strcmp(name, "HTTP_X_SCHEME")) { match_scheme_header = 1; if (value) { trusted_scheme_header = name; /* Value can be either 'http' or 'https'. */ if (!strcasecmp(value, "https")) apr_table_setn(r->subprocess_env, "HTTPS", "1"); else if (!strcasecmp(value, "http")) apr_table_unset(r->subprocess_env, "HTTPS"); } } else if (!strcmp(name, "HTTP_X_FORWARDED_HTTPS") || !strcmp(name, "HTTP_X_FORWARDED_SSL") || !strcmp(name, "HTTP_X_HTTPS")) { match_scheme_header = 1; if (value) { trusted_scheme_header = name; /* * Value can be a boolean like flag such as 'On', * 'Off', 'true', 'false', '1' or '0'. */ if (!strcasecmp(value, "On") || !strcasecmp(value, "true") || !strcasecmp(value, "1")) { apr_table_setn(r->subprocess_env, "HTTPS", "1"); } else if (!strcasecmp(value, "Off") || !strcasecmp(value, "false") || !strcasecmp(value, "0")) { apr_table_unset(r->subprocess_env, "HTTPS"); } } } } } else { /* * If it isn't a trusted proxy, we still need to knock * out any headers for categories we were interested in. */ for (i=0; inelts; i++) { const char *name; name = ((const char**)trusted_proxy_headers->elts)[i]; if (!strcmp(name, "HTTP_X_FORWARDED_FOR") || !strcmp(name, "HTTP_X_CLIENT_IP") || !strcmp(name, "HTTP_X_REAL_IP")) { match_client_header = 1; } else if (!strcmp(name, "HTTP_X_FORWARDED_HOST") || !strcmp(name, "HTTP_X_HOST")) { match_host_header = 1; } else if (!strcmp(name, "HTTP_X_SCRIPT_NAME") || !strcmp(name, "HTTP_X_FORWARDED_SCRIPT_NAME")) { match_script_name_header = 1; } else if (!strcmp(name, "HTTP_X_FORWARDED_PROTO") || !strcmp(name, "HTTP_X_FORWARDED_SCHEME") || !strcmp(name, "HTTP_X_SCHEME") || !strcmp(name, "HTTP_X_FORWARDED_HTTPS") || !strcmp(name, "HTTP_X_FORWARDED_SSL") || !strcmp(name, "HTTP_X_HTTPS")) { match_scheme_header = 1; } } } /* * Remove all client IP headers from request environment which * weren't matched as being trusted. */ if (match_client_header) { const char *name = NULL; for (i=0; (name=wsgi_proxy_client_headers[i]); i++) { if (!trusted_client_header || strcmp(name, trusted_client_header)) { apr_table_unset(r->subprocess_env, name); } } } /* * Remove all proxy scheme headers from request environment * which weren't matched as being trusted. */ if (match_scheme_header) { const char *name = NULL; for (i=0; (name=wsgi_proxy_scheme_headers[i]); i++) { if (!trusted_scheme_header || strcmp(name, trusted_scheme_header)) { apr_table_unset(r->subprocess_env, name); } } } /* * Remove all proxy host from request environment which weren't * matched as being trusted. */ if (match_host_header) { const char *name = NULL; for (i=0; (name=wsgi_proxy_host_headers[i]); i++) { if (!trusted_host_header || strcmp(name, trusted_host_header)) apr_table_unset(r->subprocess_env, name); } } /* * Remove all proxy script name headers from request environment * which weren't matched as being trusted. */ if (match_script_name_header) { const char *name = NULL; for (i=0; (name=wsgi_proxy_script_name_headers[i]); i++) { if (!trusted_script_name_header || strcmp(name, trusted_script_name_header)) { apr_table_unset(r->subprocess_env, name); } } } } static char *wsgi_http2env(apr_pool_t *a, const char *w) { char *res = (char *)apr_palloc(a, sizeof("HTTP_") + strlen(w)); char *cp = res; char c; *cp++ = 'H'; *cp++ = 'T'; *cp++ = 'T'; *cp++ = 'P'; *cp++ = '_'; while ((c = *w++) != 0) { if (apr_isalnum(c)) { *cp++ = apr_toupper(c); } else if (c == '-') { *cp++ = '_'; } else return NULL; } *cp = 0; return res; } typedef struct { PyObject_HEAD request_rec *r; WSGIRequestConfig *config; PyObject *log; } AuthObject; static AuthObject *newAuthObject(request_rec *r, WSGIRequestConfig *config) { AuthObject *self; self = PyObject_New(AuthObject, &Auth_Type); if (self == NULL) return NULL; self->config = config; self->r = r; self->log = newLogObject(r, APLOG_ERR, NULL, 0); return self; } static void Auth_dealloc(AuthObject *self) { Py_DECREF(self->log); PyObject_Del(self); } static PyObject *Auth_environ(AuthObject *self, const char *group) { PyObject *vars; PyObject *object; request_rec *r = self->r; server_rec *s = r->server; conn_rec *c = r->connection; apr_port_t rport; const apr_array_header_t *hdrs_arr; const apr_table_entry_t *hdrs; const char *value = NULL; int i; vars = PyDict_New(); hdrs_arr = apr_table_elts(r->headers_in); hdrs = (const apr_table_entry_t *) hdrs_arr->elts; for (i = 0; i < hdrs_arr->nelts; ++i) { if (!hdrs[i].key) { continue; } if (!strcasecmp(hdrs[i].key, "Content-type")) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(hdrs[i].val, strlen(hdrs[i].val), NULL); #else object = PyString_FromString(hdrs[i].val); #endif PyDict_SetItemString(vars, "CONTENT_TYPE", object); Py_DECREF(object); } else if (!strcasecmp(hdrs[i].key, "Content-length")) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(hdrs[i].val, strlen(hdrs[i].val), NULL); #else object = PyString_FromString(hdrs[i].val); #endif PyDict_SetItemString(vars, "CONTENT_LENGTH", object); Py_DECREF(object); } else if (!strcasecmp(hdrs[i].key, "Authorization") || !strcasecmp(hdrs[i].key, "Proxy-Authorization")) { continue; } else { if (hdrs[i].val) { char *header = wsgi_http2env(r->pool, hdrs[i].key); if (header) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(hdrs[i].val, strlen(hdrs[i].val), NULL); #else object = PyString_FromString(hdrs[i].val); #endif PyDict_SetItemString(vars, header, object); Py_DECREF(object); } } } } value = ap_psignature("", r); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_SIGNATURE", object); Py_DECREF(object); #if AP_MODULE_MAGIC_AT_LEAST(20060905,0) value = ap_get_server_banner(); #else value = ap_get_server_version(); #endif #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_SOFTWARE", object); Py_DECREF(object); value = ap_escape_html(r->pool, ap_get_server_name(r)); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_NAME", object); Py_DECREF(object); if (r->connection->local_ip) { value = r->connection->local_ip; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_ADDR", object); Py_DECREF(object); } value = apr_psprintf(r->pool, "%u", ap_get_server_port(r)); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_PORT", object); Py_DECREF(object); value = ap_get_remote_host(c, r->per_dir_config, REMOTE_HOST, NULL); if (value) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_HOST", object); Py_DECREF(object); } #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) if (r->useragent_ip) { value = r->useragent_ip; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_ADDR", object); Py_DECREF(object); } #else if (c->remote_ip) { value = c->remote_ip; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_ADDR", object); Py_DECREF(object); } #endif #if PY_MAJOR_VERSION >= 3 value = ap_document_root(r); object = PyUnicode_Decode(value, strlen(value), Py_FileSystemDefaultEncoding, "surrogateescape"); #else object = PyString_FromString(ap_document_root(r)); #endif PyDict_SetItemString(vars, "DOCUMENT_ROOT", object); Py_DECREF(object); if (s->server_admin) { value = s->server_admin; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_ADMIN", object); Py_DECREF(object); } #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) rport = c->client_addr->port; value = apr_itoa(r->pool, rport); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_PORT", object); Py_DECREF(object); #else rport = c->remote_addr->port; value = apr_itoa(r->pool, rport); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REMOTE_PORT", object); Py_DECREF(object); #endif value = r->protocol; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SERVER_PROTOCOL", object); Py_DECREF(object); value = r->method; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REQUEST_METHOD", object); Py_DECREF(object); value = r->args ? r->args : ""; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "QUERY_STRING", object); Py_DECREF(object); value = wsgi_original_uri(r); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "REQUEST_URI", object); Py_DECREF(object); /* * XXX Apparently webdav does actually do modifications to * the uri and path_info attributes of request and they * could be used as part of authorisation. */ if (!strcmp(r->protocol, "INCLUDED")) { value = r->uri; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SCRIPT_NAME", object); Py_DECREF(object); value = r->path_info ? r->path_info : ""; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "PATH_INFO", object); Py_DECREF(object); } else if (!r->path_info || !*r->path_info) { value = r->uri; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SCRIPT_NAME", object); Py_DECREF(object); value = ""; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "PATH_INFO", object); Py_DECREF(object); } else { int path_info_start = ap_find_path_info(r->uri, r->path_info); value = apr_pstrndup(r->pool, r->uri, path_info_start); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "SCRIPT_NAME", object); Py_DECREF(object); value = r->path_info ? r->path_info : ""; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else object = PyString_FromString(value); #endif PyDict_SetItemString(vars, "PATH_INFO", object); Py_DECREF(object); } object = Py_BuildValue("(iii)", AP_SERVER_MAJORVERSION_NUMBER, AP_SERVER_MINORVERSION_NUMBER, AP_SERVER_PATCHLEVEL_NUMBER); PyDict_SetItemString(vars, "apache.version", object); Py_DECREF(object); object = Py_BuildValue("(iii)", MOD_WSGI_MAJORVERSION_NUMBER, MOD_WSGI_MINORVERSION_NUMBER, MOD_WSGI_MICROVERSION_NUMBER); PyDict_SetItemString(vars, "mod_wsgi.version", object); Py_DECREF(object); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString(""); #else object = PyString_FromString(""); #endif PyDict_SetItemString(vars, "mod_wsgi.process_group", object); Py_DECREF(object); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(group, strlen(group), NULL); #else object = PyString_FromString(group); #endif PyDict_SetItemString(vars, "mod_wsgi.application_group", object); Py_DECREF(object); object = PyLong_FromLong(self->config->script_reloading); PyDict_SetItemString(vars, "mod_wsgi.script_reloading", object); Py_DECREF(object); /* * Setup log object for WSGI errors. Don't decrement * reference to log object as keep reference to it. */ object = (PyObject *)self->log; PyDict_SetItemString(vars, "wsgi.errors", object); /* * If Apache extensions are enabled add a CObject reference * to the Apache request_rec structure instance. */ if (!wsgi_daemon_pool && self->config->pass_apache_request) { #if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 2) || \ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 7) object = PyCapsule_New(self->r, 0, 0); #else object = PyCObject_FromVoidPtr(self->r, 0); #endif PyDict_SetItemString(vars, "apache.request_rec", object); Py_DECREF(object); } /* * Extensions for accessing SSL certificate information from * mod_ssl when in use. */ object = PyObject_GetAttrString((PyObject *)self, "ssl_is_https"); PyDict_SetItemString(vars, "mod_ssl.is_https", object); Py_DECREF(object); object = PyObject_GetAttrString((PyObject *)self, "ssl_var_lookup"); PyDict_SetItemString(vars, "mod_ssl.var_lookup", object); Py_DECREF(object); return vars; } static PyObject *Auth_ssl_is_https(AuthObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_is_https) *ssl_is_https = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, ":ssl_is_https")) return NULL; ssl_is_https = APR_RETRIEVE_OPTIONAL_FN(ssl_is_https); if (ssl_is_https == 0) return Py_BuildValue("i", 0); return Py_BuildValue("i", ssl_is_https(self->r->connection)); } static PyObject *Auth_ssl_var_lookup(AuthObject *self, PyObject *args) { APR_OPTIONAL_FN_TYPE(ssl_var_lookup) *ssl_var_lookup = 0; PyObject *item = NULL; PyObject *latin_item = NULL; char *name = 0; char *value = 0; if (!self->r) { PyErr_SetString(PyExc_RuntimeError, "request object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:ssl_var_lookup", &item)) return NULL; #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { PyErr_Format(PyExc_TypeError, "byte string value expected, " "value containing non 'latin-1' characters found"); return NULL; } item = latin_item; } #endif if (!PyString_Check(item)) { PyErr_Format(PyExc_TypeError, "byte string value expected, value " "of type %.200s found", item->ob_type->tp_name); Py_XDECREF(latin_item); return NULL; } name = PyString_AsString(item); ssl_var_lookup = APR_RETRIEVE_OPTIONAL_FN(ssl_var_lookup); if (ssl_var_lookup == 0) { Py_XDECREF(latin_item); Py_INCREF(Py_None); return Py_None; } value = ssl_var_lookup(self->r->pool, self->r->server, self->r->connection, self->r, name); Py_XDECREF(latin_item); if (!value) { Py_INCREF(Py_None); return Py_None; } #if PY_MAJOR_VERSION >= 3 return PyUnicode_DecodeLatin1(value, strlen(value), NULL); #else return PyString_FromString(value); #endif } static PyMethodDef Auth_methods[] = { { "ssl_is_https", (PyCFunction)Auth_ssl_is_https, METH_VARARGS, 0 }, { "ssl_var_lookup", (PyCFunction)Auth_ssl_var_lookup, METH_VARARGS, 0 }, { NULL, NULL} }; static PyTypeObject Auth_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Auth", /*tp_name*/ sizeof(AuthObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Auth_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ Auth_methods, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; #if defined(MOD_WSGI_WITH_AUTHN_PROVIDER) static authn_status wsgi_check_password(request_rec *r, const char *user, const char *password) { WSGIRequestConfig *config; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; authn_status status; config = wsgi_create_req_config(r->pool, r); if (!config->auth_user_script) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Location of WSGI user " "authentication script not provided.", getpid()); return AUTH_GENERAL_ERROR; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->auth_user_script->handler_script; group = wsgi_server_group(r, config->auth_user_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return AUTH_GENERAL_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS if (config->script_reloading) { Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS } #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group, 0); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS if (config->script_reloading) apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Log any details of exceptions if import failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Assume an internal server error unless everything okay. */ status = AUTH_GENERAL_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "check_password"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *result = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Oss)", vars, user, password); result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (result) { if (result == Py_None) { status = AUTH_USER_NOT_FOUND; } else if (result == Py_True) { status = AUTH_GRANTED; } else if (result == Py_False) { status = AUTH_DENIED; } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *str = NULL; str = PyUnicode_AsUTF8String(result); if (str) { adapter->r->user = apr_pstrdup(adapter->r->pool, PyString_AsString(str)); status = AUTH_GRANTED; } } #else else if (PyString_Check(result)) { adapter->r->user = apr_pstrdup(adapter->r->pool, PyString_AsString(result)); status = AUTH_GRANTED; } #endif else { PyErr_SetString(PyExc_TypeError, "Basic auth " "provider must return True, False " "None or user name as string"); } Py_DECREF(result); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { result = PyObject_CallObject(method, NULL); Py_XDECREF(result); } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Target WSGI user " "authentication script '%s' does not provide " "'Basic' auth provider.", getpid(), script); Py_END_ALLOW_THREADS } } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } static authn_status wsgi_get_realm_hash(request_rec *r, const char *user, const char *realm, char **rethash) { WSGIRequestConfig *config; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; authn_status status; config = wsgi_create_req_config(r->pool, r); if (!config->auth_user_script) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Location of WSGI user " "authentication script not provided.", getpid()); return AUTH_GENERAL_ERROR; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->auth_user_script->handler_script; group = wsgi_server_group(r, config->auth_user_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return AUTH_GENERAL_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group, 0); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Log any details of exceptions if import failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Assume an internal server error unless everything okay. */ status = AUTH_GENERAL_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "get_realm_hash"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *result = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Oss)", vars, user, realm); result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (result) { if (result == Py_None) { status = AUTH_USER_NOT_FOUND; } else if (PyString_Check(result)) { *rethash = PyString_AsString(result); *rethash = apr_pstrdup(r->pool, *rethash); status = AUTH_USER_FOUND; } #if PY_MAJOR_VERSION >= 3 else if (PyUnicode_Check(result)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(result); if (!latin_item) { PyErr_SetString(PyExc_TypeError, "Digest auth " "provider must return None " "or string object, value " "containing non 'latin-1' " "characters found"); } else { Py_DECREF(result); result = latin_item; *rethash = PyString_AsString(result); *rethash = apr_pstrdup(r->pool, *rethash); status = AUTH_USER_FOUND; } } #endif else { PyErr_SetString(PyExc_TypeError, "Digest auth " "provider must return None " "or string object"); } Py_DECREF(result); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); result = PyObject_CallObject(method, args); Py_XDECREF(result); Py_DECREF(args); } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Target WSGI user " "authentication script '%s' does not provide " "'Digest' auth provider.", getpid(), script); Py_END_ALLOW_THREADS } } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } static const authn_provider wsgi_authn_provider = { &wsgi_check_password, &wsgi_get_realm_hash }; #endif static int wsgi_groups_for_user(request_rec *r, WSGIRequestConfig *config, apr_table_t **grpstatus) { apr_table_t *grps = apr_table_make(r->pool, 15); InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; int status = HTTP_INTERNAL_SERVER_ERROR; if (!config->auth_group_script) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Location of WSGI group " "authentication script not provided.", getpid()); return HTTP_INTERNAL_SERVER_ERROR; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->auth_group_script->handler_script; group = wsgi_server_group(r, config->auth_group_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return HTTP_INTERNAL_SERVER_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group, 0); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Log any details of exceptions if import failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Assume an internal server error unless everything okay. */ status = HTTP_INTERNAL_SERVER_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "groups_for_user"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *result = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Os)", vars, r->user); result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (result) { PyObject *iterator; iterator = PyObject_GetIter(result); if (iterator) { PyObject *item; const char *name; status = OK; while ((item = PyIter_Next(iterator))) { #if PY_MAJOR_VERSION >= 3 if (PyUnicode_Check(item)) { PyObject *latin_item; latin_item = PyUnicode_AsLatin1String(item); if (!latin_item) { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): " "Groups for user returned " "from '%s' must be an " "iterable sequence of " "byte strings, value " "containing non 'latin-1' " "characters found", getpid(), script); Py_END_ALLOW_THREADS Py_DECREF(item); status = HTTP_INTERNAL_SERVER_ERROR; break; } else { Py_DECREF(item); item = latin_item; } } #endif if (!PyString_Check(item)) { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Groups for " "user returned from '%s' must " "be an iterable sequence of " "byte strings.", getpid(), script); Py_END_ALLOW_THREADS Py_DECREF(item); status = HTTP_INTERNAL_SERVER_ERROR; break; } name = PyString_AsString(item); apr_table_setn(grps, apr_pstrdup(r->pool, name), "1"); Py_DECREF(item); } Py_DECREF(iterator); } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Groups for user " "returned from '%s' must be an iterable " "sequence of byte strings.", getpid(), script); Py_END_ALLOW_THREADS } Py_DECREF(result); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); result = PyObject_CallObject(method, args); Py_XDECREF(result); Py_DECREF(args); } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Target WSGI group " "authentication script '%s' does not provide " "group provider.", getpid(), script); Py_END_ALLOW_THREADS } } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); if (status == OK) *grpstatus = grps; return status; } static int wsgi_allow_access(request_rec *r, WSGIRequestConfig *config, const char *host) { InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; int allow = 0; if (!config->access_script) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Location of WSGI host " "access script not provided.", getpid()); return 0; } /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->access_script->handler_script; group = wsgi_server_group(r, config->access_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return 0; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group, 0); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Log any details of exceptions if import failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Assume not allowed unless everything okay. */ allow = 0; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "allow_access"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *result = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Oz)", vars, host); result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (result) { if (result == Py_None) { allow = -1; } else if (PyBool_Check(result)) { if (result == Py_True) allow = 1; } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Indicator of " "host accessibility returned from '%s' " "must a boolean or None.", getpid(), script); Py_END_ALLOW_THREADS } Py_DECREF(result); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { args = PyTuple_New(0); result = PyObject_CallObject(method, args); Py_XDECREF(result); Py_DECREF(args); } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Target WSGI host " "access script '%s' does not provide " "host validator.", getpid(), script); Py_END_ALLOW_THREADS } } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return allow; } static int wsgi_hook_access_checker(request_rec *r) { WSGIRequestConfig *config; int allow = 0; const char *host = NULL; config = wsgi_create_req_config(r->pool, r); if (!config->access_script) return DECLINED; host = ap_get_remote_host(r->connection, r->per_dir_config, REMOTE_HOST, NULL); #if AP_MODULE_MAGIC_AT_LEAST(20111130,0) if (!host) host = r->useragent_ip; #else if (!host) host = r->connection->remote_ip; #endif allow = wsgi_allow_access(r, config, host); if (allow < 0) return DECLINED; else if (allow) return OK; if (ap_satisfies(r) != SATISFY_ANY || !ap_some_auth_required(r)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): " "Client denied by server configuration: '%s'.", getpid(), r->filename); } return HTTP_FORBIDDEN; } #if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER) static int wsgi_hook_check_user_id(request_rec *r) { WSGIRequestConfig *config; int status = -1; const char *password; InterpreterObject *interp = NULL; PyObject *modules = NULL; PyObject *module = NULL; char *name = NULL; int exists = 0; const char *script; const char *group; if ((status = ap_get_basic_auth_pw(r, &password))) return status; config = wsgi_create_req_config(r->pool, r); if (!config->auth_user_script) return DECLINED; /* * Acquire the desired python interpreter. Once this is done * it is safe to start manipulating python objects. */ script = config->auth_user_script->handler_script; group = wsgi_server_group(r, config->auth_user_script->application_group); interp = wsgi_acquire_interpreter(group); if (!interp) { ap_log_rerror(APLOG_MARK, APLOG_CRIT, 0, r, "mod_wsgi (pid=%d): Cannot acquire interpreter '%s'.", getpid(), group); return HTTP_INTERNAL_SERVER_ERROR; } /* Calculate the Python module name to be used for script. */ name = wsgi_module_name(r->pool, script); /* * Use a lock around the check to see if the module is * already loaded and the import of the module to prevent * two request handlers trying to import the module at the * same time. */ #if APR_HAS_THREADS Py_BEGIN_ALLOW_THREADS apr_thread_mutex_lock(wsgi_module_lock); Py_END_ALLOW_THREADS #endif modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, name); Py_XINCREF(module); if (module) exists = 1; /* * If script reloading is enabled and the module for it has * previously been loaded, see if it has been modified since * the last time it was accessed. */ if (module && config->script_reloading) { if (wsgi_reload_required(r->pool, r, script, module, NULL)) { /* * Script file has changed. Only support module * reloading for authentication scripts. Remove the * module from the modules dictionary before * reloading it again. If code is executing within * the module at the time, the callers reference * count on the module should ensure it isn't * actually destroyed until it is finished. */ Py_DECREF(module); module = NULL; PyDict_DelItemString(modules, name); } } if (!module) { module = wsgi_load_source(r->pool, r, name, exists, script, "", group, 0); } /* Safe now to release the module lock. */ #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_module_lock); #endif /* Log any details of exceptions if import failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Assume an internal server error unless everything okay. */ status = HTTP_INTERNAL_SERVER_ERROR; /* Determine if script exists and execute it. */ if (module) { PyObject *module_dict = NULL; PyObject *object = NULL; module_dict = PyModule_GetDict(module); object = PyDict_GetItemString(module_dict, "check_password"); if (object) { PyObject *vars = NULL; PyObject *args = NULL; PyObject *result = NULL; PyObject *method = NULL; AuthObject *adapter = NULL; adapter = newAuthObject(r, config); if (adapter) { vars = Auth_environ(adapter, group); Py_INCREF(object); args = Py_BuildValue("(Oss)", vars, r->user, password); result = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); Py_DECREF(vars); if (result) { if (result == Py_None) { if (config->user_authoritative) { ap_note_basic_auth_failure(r); status = HTTP_UNAUTHORIZED; ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): User '%s' not " "found in executing authentication " "script '%s', for uri '%s'.", getpid(), r->user, script, r->uri); } else status = DECLINED; } else if (result == Py_True) { status = OK; } else if (result == Py_False) { ap_note_basic_auth_failure(r); status = HTTP_UNAUTHORIZED; ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Password mismatch " "for user '%s' in executing " "authentication script '%s', for uri " "'%s'.", getpid(), r->user, script, r->uri); } else { PyErr_SetString(PyExc_TypeError, "Basic auth " "provider must return True, False " "or None"); } Py_DECREF(result); } /* * Wipe out references to Apache request object * held by Python objects, so can detect when an * application holds on to the transient Python * objects beyond the life of the request and * thus raise an exception if they are used. */ adapter->r = NULL; /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); /* Close the log object so data is flushed. */ method = PyObject_GetAttrString(adapter->log, "close"); if (!method) { PyErr_Format(PyExc_AttributeError, "'%s' object has no attribute 'close'", adapter->log->ob_type->tp_name); } else { result = PyObject_CallObject(method, NULL); Py_XDECREF(result); } /* Log any details of exceptions if execution failed. */ if (PyErr_Occurred()) wsgi_log_python_error(r, NULL, script, 0); Py_XDECREF(method); /* No longer need adapter object. */ Py_DECREF((PyObject *)adapter); } } else { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Target WSGI user " "authentication script '%s' does not provide " "'Basic' auth provider.", getpid(), script); Py_END_ALLOW_THREADS } } /* Cleanup and release interpreter, */ Py_XDECREF(module); wsgi_release_interpreter(interp); return status; } #endif #if defined(MOD_WSGI_WITH_AUTHZ_PROVIDER) #if MOD_WSGI_WITH_AUTHZ_PROVIDER_PARSED static authz_status wsgi_check_authorization(request_rec *r, const char *require_args, const void *parsed_require_line) #else static authz_status wsgi_check_authorization(request_rec *r, const char *require_args) #endif { WSGIRequestConfig *config; apr_table_t *grpstatus = NULL; const char *t, *w; int status; #if AP_MODULE_MAGIC_AT_LEAST(20100714,0) if (!r->user) return AUTHZ_DENIED_NO_USER; #endif config = wsgi_create_req_config(r->pool, r); if (!config->auth_group_script) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Location of WSGI group " "authorization script not provided.", getpid()); return AUTHZ_DENIED; } status = wsgi_groups_for_user(r, config, &grpstatus); if (status != OK) return AUTHZ_DENIED; if (apr_table_elts(grpstatus)->nelts == 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): " "Authorization of user '%s' to access '%s' failed. " "User is not a member of any groups.", getpid(), r->user, r->uri); return AUTHZ_DENIED; } t = require_args; while ((w = ap_getword_conf(r->pool, &t)) && w[0]) { if (apr_table_get(grpstatus, w)) { return AUTHZ_GRANTED; } } ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): " "Authorization of user '%s' to access '%s' failed. " "User is not a member of designated groups.", getpid(), r->user, r->uri); return AUTHZ_DENIED; } static const authz_provider wsgi_authz_provider = { &wsgi_check_authorization, #if MOD_WSGI_WITH_AUTHZ_PROVIDER_PARSED NULL, #endif }; #else static int wsgi_hook_auth_checker(request_rec *r) { WSGIRequestConfig *config; int m = r->method_number; const apr_array_header_t *reqs_arr; require_line *reqs; int required_group = 0; register int x; const char *t, *w; apr_table_t *grpstatus = NULL; char *reason = NULL; config = wsgi_create_req_config(r->pool, r); if (!config->auth_group_script) return DECLINED; reqs_arr = ap_requires(r); if (!reqs_arr) return DECLINED; reqs = (require_line *)reqs_arr->elts; for (x = 0; x < reqs_arr->nelts; x++) { if (!(reqs[x].method_mask & (AP_METHOD_BIT << m))) { continue; } t = reqs[x].requirement; w = ap_getword_white(r->pool, &t); #if AP_MODULE_MAGIC_AT_LEAST(20100714,0) if (!strcasecmp(w, "wsgi-group")) { #else if (!strcasecmp(w, "group") || !strcasecmp(w, "wsgi-group")) { #endif required_group = 1; if (!grpstatus) { int status; status = wsgi_groups_for_user(r, config, &grpstatus); if (status != OK) return status; if (apr_table_elts(grpstatus)->nelts == 0) { reason = "User is not a member of any groups"; break; } } while (t[0]) { w = ap_getword_conf(r->pool, &t); if (apr_table_get(grpstatus, w)) { return OK; } } } } if (!required_group || !config->group_authoritative) return DECLINED; ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): " "Authorization of user '%s' to access '%s' failed. %s.", getpid(), r->user, r->uri, reason ? reason : "User is not " "a member of designated groups"); ap_note_auth_failure(r); return HTTP_UNAUTHORIZED; } #endif APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *wsgi_logio_add_bytes_out; static void ap_logio_add_bytes_out(conn_rec *c, apr_off_t bytes) { if (!wsgi_daemon_pool && wsgi_logio_add_bytes_out) wsgi_logio_add_bytes_out(c, bytes); } static int wsgi_hook_logio(apr_pool_t *pconf, apr_pool_t *ptemp, apr_pool_t *plog, server_rec *s) { /* * This horrible fiddle is to insert a proxy function before * the normal ap_logio_add_bytes_out() function so that the * call to it can be disabled when mod_wsgi running in daemon * mode. If this is not done, then daemon process will crash * when mod_logio has been loaded. */ wsgi_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out); APR_REGISTER_OPTIONAL_FN(ap_logio_add_bytes_out); return OK; } static void wsgi_register_hooks(apr_pool_t *p) { static const char * const p1[] = { "mod_alias.c", NULL }; static const char * const n1[]= { "mod_userdir.c", "mod_vhost_alias.c", NULL }; static const char * const n2[] = { "core.c", NULL }; #if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER) static const char * const p3[] = { "mod_auth.c", NULL }; #endif #if !defined(MOD_WSGI_WITH_AUTHZ_PROVIDER) static const char * const n4[] = { "mod_authz_user.c", NULL }; #endif static const char * const n5[] = { "mod_authz_host.c", NULL }; static const char * const p6[] = { "mod_python.c", NULL }; static const char * const p7[] = { "mod_ssl.c", NULL }; ap_hook_post_config(wsgi_hook_init, p6, NULL, APR_HOOK_MIDDLE); ap_hook_child_init(wsgi_hook_child_init, p6, NULL, APR_HOOK_MIDDLE); ap_hook_translate_name(wsgi_hook_intercept, p1, n1, APR_HOOK_MIDDLE); ap_hook_handler(wsgi_hook_handler, NULL, NULL, APR_HOOK_MIDDLE); #if defined(MOD_WSGI_WITH_DAEMONS) ap_hook_post_config(wsgi_hook_logio, NULL, n2, APR_HOOK_REALLY_FIRST); wsgi_header_filter_handle = ap_register_output_filter("WSGI_HEADER", wsgi_header_filter, NULL, AP_FTYPE_PROTOCOL); #endif #if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER) ap_hook_check_user_id(wsgi_hook_check_user_id, p3, NULL, APR_HOOK_MIDDLE); #else ap_register_provider(p, AUTHN_PROVIDER_GROUP, "wsgi", AUTHN_PROVIDER_VERSION, &wsgi_authn_provider); #endif #if !defined(MOD_WSGI_WITH_AUTHZ_PROVIDER) ap_hook_auth_checker(wsgi_hook_auth_checker, NULL, n4, APR_HOOK_MIDDLE); #else ap_register_provider(p, AUTHZ_PROVIDER_GROUP, "wsgi-group", AUTHZ_PROVIDER_VERSION, &wsgi_authz_provider); #endif ap_hook_access_checker(wsgi_hook_access_checker, p7, n5, APR_HOOK_MIDDLE); } static const command_rec wsgi_commands[] = { AP_INIT_RAW_ARGS("WSGIScriptAlias", wsgi_add_script_alias, NULL, RSRC_CONF, "Map location to target WSGI script file."), AP_INIT_RAW_ARGS("WSGIScriptAliasMatch", wsgi_add_script_alias, "*", RSRC_CONF, "Map location pattern to target WSGI script file."), #if defined(MOD_WSGI_WITH_DAEMONS) AP_INIT_RAW_ARGS("WSGIDaemonProcess", wsgi_add_daemon_process, NULL, RSRC_CONF, "Specify details of daemon processes to start."), AP_INIT_TAKE1("WSGISocketPrefix", wsgi_set_socket_prefix, NULL, RSRC_CONF, "Path prefix for the daemon process sockets."), AP_INIT_TAKE1("WSGISocketRotation", wsgi_set_socket_rotation, NULL, RSRC_CONF, "Enable/Disable rotation of daemon process sockets."), AP_INIT_TAKE1("WSGIAcceptMutex", wsgi_set_accept_mutex, NULL, RSRC_CONF, "Set accept mutex type for daemon processes."), AP_INIT_TAKE1("WSGILazyInitialization", wsgi_set_lazy_initialization, NULL, RSRC_CONF, "Enable/Disable lazy Python initialization."), #endif AP_INIT_TAKE1("WSGIVerboseDebugging", wsgi_set_verbose_debugging, NULL, RSRC_CONF, "Enable/Disable verbose debugging messages."), #if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6 AP_INIT_TAKE1("WSGIPy3kWarningFlag", wsgi_set_py3k_warning_flag, NULL, RSRC_CONF, "Enable/Disable Python 3.0 warnings."), #endif #if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 3) || \ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6) AP_INIT_TAKE1("WSGIDontWriteBytecode", wsgi_set_dont_write_bytecode, NULL, RSRC_CONF, "Enable/Disable writing of byte code."), #endif AP_INIT_TAKE1("WSGIPythonWarnings", wsgi_add_python_warnings, NULL, RSRC_CONF, "Control Python warning messages."), AP_INIT_TAKE1("WSGIPythonOptimize", wsgi_set_python_optimize, NULL, RSRC_CONF, "Set level of Python compiler optimisations."), AP_INIT_TAKE1("WSGIPythonHome", wsgi_set_python_home, NULL, RSRC_CONF, "Python prefix/exec_prefix absolute path names."), AP_INIT_TAKE1("WSGIPythonPath", wsgi_set_python_path, NULL, RSRC_CONF, "Python module search path."), AP_INIT_TAKE1("WSGIPythonEggs", wsgi_set_python_eggs, NULL, RSRC_CONF, "Python eggs cache directory."), AP_INIT_TAKE1("WSGIPythonHashSeed", wsgi_set_python_hash_seed, NULL, RSRC_CONF, "Python hash seed."), AP_INIT_TAKE1("WSGIDestroyInterpreter", wsgi_set_destroy_interpreter, NULL, RSRC_CONF, "Enable/Disable destruction of Python interpreter."), #if defined(MOD_WSGI_WITH_DAEMONS) AP_INIT_TAKE1("WSGIRestrictEmbedded", wsgi_set_restrict_embedded, NULL, RSRC_CONF, "Enable/Disable use of embedded mode."), #endif AP_INIT_TAKE1("WSGIRestrictStdin", wsgi_set_restrict_stdin, NULL, RSRC_CONF, "Enable/Disable restrictions on use of STDIN."), AP_INIT_TAKE1("WSGIRestrictStdout", wsgi_set_restrict_stdout, NULL, RSRC_CONF, "Enable/Disable restrictions on use of STDOUT."), AP_INIT_TAKE1("WSGIRestrictSignal", wsgi_set_restrict_signal, NULL, RSRC_CONF, "Enable/Disable restrictions on use of signal()."), AP_INIT_TAKE1("WSGICaseSensitivity", wsgi_set_case_sensitivity, NULL, RSRC_CONF, "Define whether file system is case sensitive."), #if defined(MOD_WSGI_WITH_DAEMONS) AP_INIT_RAW_ARGS("WSGIRestrictProcess", wsgi_set_restrict_process, NULL, ACCESS_CONF|RSRC_CONF, "Limit selectable WSGI process groups."), AP_INIT_TAKE1("WSGIProcessGroup", wsgi_set_process_group, NULL, ACCESS_CONF|RSRC_CONF, "Name of the WSGI process group."), #endif AP_INIT_TAKE1("WSGIApplicationGroup", wsgi_set_application_group, NULL, ACCESS_CONF|RSRC_CONF, "Application interpreter group."), AP_INIT_TAKE1("WSGICallableObject", wsgi_set_callable_object, NULL, OR_FILEINFO, "Name of entry point in WSGI script file."), AP_INIT_RAW_ARGS("WSGIImportScript", wsgi_add_import_script, NULL, RSRC_CONF, "Location of WSGI import script."), AP_INIT_RAW_ARGS("WSGIDispatchScript", wsgi_set_dispatch_script, NULL, ACCESS_CONF|RSRC_CONF, "Location of WSGI dispatch script."), AP_INIT_TAKE1("WSGIPassApacheRequest", wsgi_set_pass_apache_request, NULL, ACCESS_CONF|RSRC_CONF, "Enable/Disable Apache request object."), AP_INIT_TAKE1("WSGIPassAuthorization", wsgi_set_pass_authorization, NULL, OR_FILEINFO, "Enable/Disable WSGI authorization."), AP_INIT_TAKE1("WSGIScriptReloading", wsgi_set_script_reloading, NULL, OR_FILEINFO, "Enable/Disable script reloading mechanism."), AP_INIT_TAKE1("WSGIErrorOverride", wsgi_set_error_override, NULL, OR_FILEINFO, "Enable/Disable overriding of error pages."), AP_INIT_TAKE1("WSGIChunkedRequest", wsgi_set_chunked_request, NULL, OR_FILEINFO, "Enable/Disable support for chunked requests."), AP_INIT_TAKE1("WSGIMapHEADToGET", wsgi_set_map_head_to_get, NULL, OR_FILEINFO, "Enable/Disable mapping of HEAD to GET."), AP_INIT_TAKE1("WSGIIgnoreActivity", wsgi_set_ignore_activity, NULL, OR_FILEINFO, "Enable/Disable reset of inactvity timeout."), AP_INIT_RAW_ARGS("WSGITrustedProxyHeaders", wsgi_set_trusted_proxy_headers, NULL, OR_FILEINFO, "Specify a list of trusted proxy headers."), AP_INIT_RAW_ARGS("WSGITrustedProxies", wsgi_set_trusted_proxies, NULL, OR_FILEINFO, "Specify a list of trusted proxies."), #ifndef WIN32 AP_INIT_TAKE1("WSGIEnableSendfile", wsgi_set_enable_sendfile, NULL, OR_FILEINFO, "Enable/Disable support for kernel sendfile."), #endif AP_INIT_RAW_ARGS("WSGIAccessScript", wsgi_set_access_script, NULL, OR_AUTHCFG, "Location of WSGI host access script file."), AP_INIT_RAW_ARGS("WSGIAuthUserScript", wsgi_set_auth_user_script, NULL, OR_AUTHCFG, "Location of WSGI user auth script file."), AP_INIT_RAW_ARGS("WSGIAuthGroupScript", wsgi_set_auth_group_script, NULL, OR_AUTHCFG, "Location of WSGI group auth script file."), #if !defined(MOD_WSGI_WITH_AUTHN_PROVIDER) AP_INIT_TAKE1("WSGIUserAuthoritative", wsgi_set_user_authoritative, NULL, OR_AUTHCFG, "Enable/Disable as being authoritative on users."), #endif AP_INIT_TAKE1("WSGIGroupAuthoritative", wsgi_set_group_authoritative, NULL, OR_AUTHCFG, "Enable/Disable as being authoritative on groups."), AP_INIT_RAW_ARGS("WSGIHandlerScript", wsgi_add_handler_script, NULL, ACCESS_CONF|RSRC_CONF, "Location of WSGI handler script file."), AP_INIT_TAKE1("WSGIServerMetrics", wsgi_set_server_metrics, NULL, RSRC_CONF, "Enabled/Disable access to server metrics."), AP_INIT_TAKE1("WSGINewRelicConfigFile", wsgi_set_newrelic_config_file, NULL, RSRC_CONF, "New Relic monitoring agent configuration file."), AP_INIT_TAKE1("WSGINewRelicEnvironment", wsgi_set_newrelic_environment, NULL, RSRC_CONF, "New Relic monitoring agent environment."), { NULL } }; /* Dispatch list for API hooks */ module AP_MODULE_DECLARE_DATA wsgi_module = { STANDARD20_MODULE_STUFF, wsgi_create_dir_config, /* create per-dir config structures */ wsgi_merge_dir_config, /* merge per-dir config structures */ wsgi_create_server_config, /* create per-server config structures */ wsgi_merge_server_config, /* merge per-server config structures */ wsgi_commands, /* table of config file commands */ wsgi_register_hooks /* register hooks */ }; /* ------------------------------------------------------------------------- */ #if defined(_WIN32) #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initmod_wsgi(void) { } #else PyMODINIT_FUNC PyInit_mod_wsgi(void) { return NULL; } #endif #endif /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_apache.c000066400000000000000000000130751452636074700200630ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_apache.h" #include "wsgi_daemon.h" /* ------------------------------------------------------------------------- */ /* * This is to shut up ranlib when run on empty object files as it confuses * users sometimes who then think it is an error and something is wrong. */ int wsgi_apache_dummy = 1; /* ------------------------------------------------------------------------- */ #if defined(MOD_WSGI_WITH_DAEMONS) #if !AP_MODULE_MAGIC_AT_LEAST(20051115,0) void wsgi_ap_close_listeners(void) { ap_listen_rec *lr; for (lr = ap_listeners; lr; lr = lr->next) { apr_socket_close(lr->sd); lr->active = 0; } } #endif /* ------------------------------------------------------------------------- */ #if !AP_MODULE_MAGIC_AT_LEAST(20101106,1) apr_status_t wsgi_ap_pool_cleanup_set_null(void *data_) { void **ptr = (void **)data_; *ptr = NULL; return APR_SUCCESS; } #endif /* ------------------------------------------------------------------------- */ #if (APR_MAJOR_VERSION == 0) && \ (APR_MINOR_VERSION == 9) && \ (APR_PATCH_VERSION < 5) #define apr_unix_file_cleanup wsgi_apr_unix_file_cleanup apr_status_t wsgi_apr_unix_file_cleanup(void *thefile) { apr_file_t *file = thefile; return apr_file_close(file); } #endif /* ------------------------------------------------------------------------- */ #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) apr_status_t wsgi_apr_os_pipe_put_ex(apr_file_t **file, apr_os_file_t *thefile, int register_cleanup, apr_pool_t *pool) { apr_status_t rv; rv = apr_os_pipe_put(file, thefile, pool); if (register_cleanup) { apr_pool_cleanup_register(pool, (void *)(*file), apr_unix_file_cleanup, apr_pool_cleanup_null); } return rv; } #endif #endif /* ------------------------------------------------------------------------- */ #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) APR_DECLARE(apr_status_t) apr_conv_utf8_to_ucs2(const char *in, apr_size_t *inbytes, apr_wchar_t *out, apr_size_t *outwords); apr_status_t wsgi_utf8_to_unicode_path(apr_wchar_t* retstr, apr_size_t retlen, const char* srcstr) { /* TODO: The computations could preconvert the string to determine * the true size of the retstr, but that's a memory over speed * tradeoff that isn't appropriate this early in development. * * Allocate the maximum string length based on leading 4 * characters of \\?\ (allowing nearly unlimited path lengths) * plus the trailing null, then transform /'s into \\'s since * the \\?\ form doesn't allow '/' path seperators. * * Note that the \\?\ form only works for local drive paths, and * \\?\UNC\ is needed UNC paths. */ apr_size_t srcremains = strlen(srcstr) + 1; apr_wchar_t *t = retstr; apr_status_t rv; /* This is correct, we don't twist the filename if it is will * definately be shorter than 248 characters. It merits some * performance testing to see if this has any effect, but there * seem to be applications that get confused by the resulting * Unicode \\?\ style file names, especially if they use argv[0] * or call the Win32 API functions such as GetModuleName, etc. * Not every application is prepared to handle such names. * * Note also this is shorter than MAX_PATH, as directory paths * are actually limited to 248 characters. * * Note that a utf-8 name can never result in more wide chars * than the original number of utf-8 narrow chars. */ if (srcremains > 248) { if (srcstr[1] == ':' && (srcstr[2] == '/' || srcstr[2] == '\\')) { wcscpy (retstr, L"\\\\?\\"); retlen -= 4; t += 4; } else if ((srcstr[0] == '/' || srcstr[0] == '\\') && (srcstr[1] == '/' || srcstr[1] == '\\') && (srcstr[2] != '?')) { /* Skip the slashes */ srcstr += 2; srcremains -= 2; wcscpy (retstr, L"\\\\?\\UNC\\"); retlen -= 8; t += 8; } } if (rv = apr_conv_utf8_to_ucs2(srcstr, &srcremains, t, &retlen)) { return (rv == APR_INCOMPLETE) ? APR_EINVAL : rv; } if (srcremains) { return APR_ENAMETOOLONG; } for (; *t; ++t) if (*t == L'/') *t = L'\\'; return APR_SUCCESS; } #endif /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_apache.h000066400000000000000000000103561452636074700200670ustar00rootroot00000000000000#ifndef WSGI_APACHE_H #define WSGI_APACHE_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ /* * Enabled access to Apache private API and data structures. Need to do * this to access the following: * * In Apache 2.X need access to ap_create_request_config(). * * In Apache 2.X need access to core_module and core_request_config. * */ /* ------------------------------------------------------------------------- */ #define CORE_PRIVATE 1 #if defined(_WIN32) #include #endif #include "httpd.h" #if !defined(HTTPD_ROOT) #error Sorry, Apache developer package does not appear to be installed. #endif #if !defined(AP_SERVER_MAJORVERSION_NUMBER) #if AP_MODULE_MAGIC_AT_LEAST(20010224,0) #define AP_SERVER_MAJORVERSION_NUMBER 2 #define AP_SERVER_MINORVERSION_NUMBER 0 #define AP_SERVER_PATCHLEVEL_NUMBER 0 #else #define AP_SERVER_MAJORVERSION_NUMBER 1 #define AP_SERVER_MINORVERSION_NUMBER 3 #define AP_SERVER_PATCHLEVEL_NUMBER 0 #endif #endif #if !defined(AP_SERVER_BASEVERSION) #define AP_SERVER_BASEVERSION SERVER_BASEVERSION #endif #if AP_SERVER_MAJORVERSION_NUMBER < 2 #error Sorry, mod_wsgi 4.0+ requires Apache 2.0+. #endif #include "apr_lib.h" #include "ap_mpm.h" #include "ap_compat.h" #include "apr_tables.h" #include "apr_strings.h" #include "http_config.h" #include "ap_listen.h" #include "apr_version.h" #include "apr_buckets.h" #include "apr_date.h" #include "mpm_common.h" #include "apr_optional.h" APR_DECLARE_OPTIONAL_FN(int, ssl_is_https, (conn_rec *)); APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, (apr_pool_t *, server_rec *, conn_rec *, request_rec *, char *)); #include "ap_config.h" #include "http_core.h" #include "http_log.h" #include "http_main.h" #include "http_protocol.h" #include "http_request.h" #include "util_script.h" #include "util_md5.h" #include "mpm_common.h" #include "scoreboard.h" #ifdef APLOG_USE_MODULE APLOG_USE_MODULE(wsgi); #endif #ifndef APR_FPROT_GWRITE #define APR_FPROT_GWRITE APR_GWRITE #endif #ifndef APR_FPROT_WWRITE #define APR_FPROT_WWRITE APR_WWRITE #endif #ifndef MPM_NAME #define MPM_NAME ap_show_mpm() #endif #if !AP_MODULE_MAGIC_AT_LEAST(20050127,0) /* Debian backported ap_regex_t to Apache 2.0 and * thus made official version checking break. */ #ifndef AP_REG_EXTENDED typedef regex_t ap_regex_t; typedef regmatch_t ap_regmatch_t; #define AP_REG_EXTENDED REG_EXTENDED #endif #endif #if !AP_MODULE_MAGIC_AT_LEAST(20081201,0) #define ap_unixd_config unixd_config #endif #if !AP_MODULE_MAGIC_AT_LEAST(20051115,0) extern void wsgi_ap_close_listeners(void); #define ap_close_listeners wsgi_ap_close_listeners #endif #if !AP_MODULE_MAGIC_AT_LEAST(20101106,1) extern apr_status_t wsgi_ap_pool_cleanup_set_null(void *); #define ap_pool_cleanup_set_null wsgi_ap_pool_cleanup_set_null #endif #if (APR_MAJOR_VERSION == 0) && \ (APR_MINOR_VERSION == 9) && \ (APR_PATCH_VERSION < 5) extern apr_status_t wsgi_apr_unix_file_cleanup(void *); extern apr_status_t wsgi_apr_os_pipe_put_ex(apr_file_t **, apr_os_file_t *, int, apr_pool_t *); #define apr_unix_file_cleanup wsgi_apr_unix_file_cleanup #define apr_os_pipe_put_ex wsgi_apr_os_pipe_put_ex #endif #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) typedef apr_uint16_t apr_wchar_t; extern apr_status_t wsgi_utf8_to_unicode_path(apr_wchar_t* retstr, apr_size_t retlen, const char* srcstr); #endif /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_buckets.c000066400000000000000000000112511452636074700202740ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_buckets.h" #include "wsgi_interp.h" /* ------------------------------------------------------------------------- */ typedef struct { apr_bucket_refcount refcount; char *base; const char *application_group; PyObject *string_object; int decref_string; } wsgi_apr_bucket_python; /* ------------------------------------------------------------------------- */ static void wsgi_python_bucket_destroy(void *data) { wsgi_apr_bucket_python *h = data; if (apr_bucket_shared_destroy(h)) { if (h->decref_string) { InterpreterObject *interp = NULL; interp = wsgi_acquire_interpreter(h->application_group); Py_DECREF(h->string_object); wsgi_release_interpreter(interp); } apr_bucket_free(h); } } /* ------------------------------------------------------------------------- */ static apr_status_t wsgi_python_bucket_read(apr_bucket *b, const char **str, apr_size_t *len, apr_read_type_e block) { wsgi_apr_bucket_python *h = b->data; *str = h->base + b->start; *len = b->length; return APR_SUCCESS; } /* ------------------------------------------------------------------------- */ static apr_bucket *wsgi_apr_bucket_python_make(apr_bucket *b, const char *buf, apr_size_t length, const char *application_group, PyObject *string_object, int decref_string ) { wsgi_apr_bucket_python *h; h = apr_bucket_alloc(sizeof(*h), b->list); h->base = (char *)buf; h->application_group = application_group; h->string_object = string_object; h->decref_string = decref_string; b = apr_bucket_shared_make(b, h, 0, length); b->type = &wsgi_apr_bucket_type_python; return b; } /* ------------------------------------------------------------------------- */ apr_bucket *wsgi_apr_bucket_python_create(const char *buf, apr_size_t length, const char *application_group, PyObject *string_object, apr_bucket_alloc_t *list) { apr_bucket *b = apr_bucket_alloc(sizeof(*b), list); APR_BUCKET_INIT(b); b->free = apr_bucket_free; b->list = list; return wsgi_apr_bucket_python_make(b, buf, length, application_group, string_object, 0); } /* ------------------------------------------------------------------------- */ static apr_status_t wsgi_python_bucket_setaside(apr_bucket *b, apr_pool_t *p) { wsgi_apr_bucket_python *h = b->data; if (h->decref_string) { /* * XXX Not sure if this is correct. Can't assume that if doing * a set aside of a bucket which was already set aside that * we aren't still in context of active interpreter. */ InterpreterObject *interp = NULL; interp = wsgi_acquire_interpreter(h->application_group); Py_INCREF(h->string_object); wsgi_release_interpreter(interp); } else { Py_INCREF(h->string_object); } wsgi_apr_bucket_python_make(b, (char *)h->base + b->start, b->length, h->application_group, h->string_object, 1); return APR_SUCCESS; } /* ------------------------------------------------------------------------- */ const apr_bucket_type_t wsgi_apr_bucket_type_python = { "PYTHON", 5, APR_BUCKET_DATA, wsgi_python_bucket_destroy, wsgi_python_bucket_read, wsgi_python_bucket_setaside, apr_bucket_shared_split, apr_bucket_shared_copy }; /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_buckets.h000066400000000000000000000026101452636074700203000ustar00rootroot00000000000000#ifndef WSGI_BUCKETS_H #define WSGI_BUCKETS_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" /* ------------------------------------------------------------------------- */ extern const apr_bucket_type_t wsgi_apr_bucket_type_python; apr_bucket *wsgi_apr_bucket_python_create(const char *buf, apr_size_t length, const char *application_group, PyObject *string_object, apr_bucket_alloc_t *list); /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_convert.c000066400000000000000000000105771452636074700203260ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_convert.h" #include "wsgi_validate.h" /* ------------------------------------------------------------------------- */ PyObject *wsgi_convert_string_to_bytes(PyObject *value) { PyObject *result = NULL; #if PY_MAJOR_VERSION >= 3 if (!PyUnicode_Check(value)) { PyErr_Format(PyExc_TypeError, "expected unicode object, value " "of type %.200s found", value->ob_type->tp_name); return NULL; } result = PyUnicode_AsLatin1String(value); if (!result) { PyErr_SetString(PyExc_ValueError, "unicode object contains non " "latin-1 characters"); return NULL; } #else if (!PyBytes_Check(value)) { PyErr_Format(PyExc_TypeError, "expected byte string object, " "value of type %.200s found", value->ob_type->tp_name); return NULL; } Py_INCREF(value); result = value; #endif return result; } /* ------------------------------------------------------------------------- */ PyObject *wsgi_convert_status_line_to_bytes(PyObject *status_line) { PyObject *result = NULL; result = wsgi_convert_string_to_bytes(status_line); if (!result) return NULL; if (!wsgi_validate_status_line(result)) { Py_DECREF(result); return NULL; } return result; } /* ------------------------------------------------------------------------- */ PyObject *wsgi_convert_headers_to_bytes(PyObject *headers) { PyObject *result = NULL; int i; long size; if (!PyList_Check(headers)) { PyErr_Format(PyExc_TypeError, "expected list object for headers, " "value of type %.200s found", headers->ob_type->tp_name); return 0; } size = PyList_Size(headers); result = PyList_New(size); for (i = 0; i < size; i++) { PyObject *header = NULL; PyObject *header_name = NULL; PyObject *header_value = NULL; PyObject *header_name_as_bytes = NULL; PyObject *header_value_as_bytes = NULL; PyObject *result_tuple = NULL; header = PyList_GetItem(headers, i); if (!PyTuple_Check(header)) { PyErr_Format(PyExc_TypeError, "list of tuple values " "expected for headers, value of type %.200s found", header->ob_type->tp_name); Py_DECREF(result); return 0; } if (PyTuple_Size(header) != 2) { PyErr_Format(PyExc_ValueError, "tuple of length 2 " "expected for header, length is %d", (int)PyTuple_Size(header)); Py_DECREF(result); return 0; } result_tuple = PyTuple_New(2); PyList_SET_ITEM(result, i, result_tuple); header_name = PyTuple_GetItem(header, 0); header_value = PyTuple_GetItem(header, 1); header_name_as_bytes = wsgi_convert_string_to_bytes(header_name); if (!header_name_as_bytes) goto failure; PyTuple_SET_ITEM(result_tuple, 0, header_name_as_bytes); if (!wsgi_validate_header_name(header_name_as_bytes)) goto failure; header_value_as_bytes = wsgi_convert_string_to_bytes(header_value); if (!header_value_as_bytes) goto failure; PyTuple_SET_ITEM(result_tuple, 1, header_value_as_bytes); if (!wsgi_validate_header_value(header_value_as_bytes)) goto failure; } return result; failure: Py_DECREF(result); return NULL; } /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_convert.h000066400000000000000000000023351452636074700203240ustar00rootroot00000000000000#ifndef WSGI_CONVERT_H #define WSGI_CONVERT_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" /* ------------------------------------------------------------------------- */ extern PyObject *wsgi_convert_string_to_bytes(PyObject *value); extern PyObject *wsgi_convert_status_line_to_bytes(PyObject *headers); extern PyObject *wsgi_convert_headers_to_bytes(PyObject *headers); /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_daemon.c000066400000000000000000000024441452636074700201030ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_daemon.h" /* ------------------------------------------------------------------------- */ #if defined(MOD_WSGI_WITH_DAEMONS) int wsgi_daemon_count = 0; apr_hash_t *wsgi_daemon_index = NULL; apr_hash_t *wsgi_daemon_listeners = NULL; WSGIDaemonProcess *wsgi_daemon_process = NULL; int volatile wsgi_request_count = 0; WSGIDaemonThread *wsgi_worker_threads = NULL; WSGIThreadStack *wsgi_worker_stack = NULL; #endif /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_daemon.h000066400000000000000000000110561452636074700201070ustar00rootroot00000000000000#ifndef WSGI_DAEMON_H #define WSGI_DAEMON_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" /* ------------------------------------------------------------------------- */ #ifndef WIN32 #if APR_HAS_OTHER_CHILD && APR_HAS_THREADS && APR_HAS_FORK #define MOD_WSGI_WITH_DAEMONS 1 #endif #endif /* * Apache 2.X and UNIX specific definitions related to * distinct daemon processes. */ #if defined(MOD_WSGI_WITH_DAEMONS) #include "unixd.h" #include "scoreboard.h" #include "mpm_common.h" #include "apr_proc_mutex.h" #include "apr_thread_cond.h" #include "apr_atomic.h" #include "http_connection.h" #include "apr_poll.h" #include "apr_signal.h" #include "http_vhost.h" #if APR_MAJOR_VERSION < 2 #include "apr_support.h" #endif #if APR_MAJOR_VERSION < 1 #define apr_atomic_cas32 apr_atomic_cas #endif #if APR_HAVE_SYS_SOCKET_H #include #endif #if APR_HAVE_UNISTD_H #include #endif #if APR_HAVE_SYS_TYPES_H #include #endif #ifdef HAVE_SYS_SEM_H #include #endif #include #include #ifndef WSGI_LISTEN_BACKLOG #define WSGI_LISTEN_BACKLOG 100 #endif #define WSGI_STACK_HEAD 0xffff #define WSGI_STACK_LAST 0xffff #define WSGI_STACK_TERMINATED 0x10000 #define WSGI_STACK_NO_LISTENER 0x20000 typedef struct { server_rec *server; long random; int id; const char *name; const char *user; uid_t uid; const char *group; gid_t gid; const char *groups_list; int groups_count; gid_t *groups; int processes; int multiprocess; int threads; long umask; const char *root; const char *home; const char *lang; const char *locale; const char *python_home; const char *python_path; const char *python_eggs; int stack_size; int maximum_requests; int shutdown_timeout; apr_time_t startup_timeout; apr_time_t deadlock_timeout; apr_time_t inactivity_timeout; apr_time_t request_timeout; apr_time_t graceful_timeout; apr_time_t eviction_timeout; apr_time_t restart_interval; apr_time_t connect_timeout; apr_time_t socket_timeout; apr_time_t queue_timeout; const char *socket_user; int listen_backlog; const char *display_name; int send_buffer_size; int recv_buffer_size; int header_buffer_size; int response_buffer_size; apr_time_t response_socket_timeout; const char *script_user; const char *script_group; int cpu_time_limit; int cpu_priority; rlim_t memory_limit; rlim_t virtual_memory_limit; const char *socket_path; int socket_rotation; int listener_fd; const char* mutex_path; apr_proc_mutex_t* mutex; int server_metrics; const char *newrelic_config_file; const char *newrelic_environment; } WSGIProcessGroup; typedef struct { WSGIProcessGroup *group; int instance; apr_proc_t process; apr_socket_t *listener; } WSGIDaemonProcess; typedef struct { int id; WSGIDaemonProcess *process; apr_thread_t *thread; int running; int next; int wakeup; apr_thread_cond_t *condition; apr_thread_mutex_t *mutex; apr_time_t request; } WSGIDaemonThread; typedef struct { apr_uint32_t state; } WSGIThreadStack; typedef struct { const char *name; const char *socket_path; apr_time_t connect_timeout; apr_time_t socket_timeout; apr_socket_t *socket; } WSGIDaemonSocket; extern int wsgi_daemon_count; extern apr_hash_t *wsgi_daemon_index; extern apr_hash_t *wsgi_daemon_listeners; extern WSGIDaemonProcess *wsgi_daemon_process; extern int volatile wsgi_request_count; extern WSGIDaemonThread *wsgi_worker_threads; extern WSGIThreadStack *wsgi_worker_stack; extern int volatile wsgi_daemon_shutdown; #endif /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_interp.c000066400000000000000000002556011452636074700201460ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_interp.h" #include "wsgi_version.h" #include "wsgi_apache.h" #include "wsgi_server.h" #include "wsgi_logger.h" #include "wsgi_restrict.h" #include "wsgi_stream.h" #include "wsgi_metrics.h" #include "wsgi_daemon.h" #include "wsgi_metrics.h" #include "wsgi_thread.h" #if APR_HAVE_UNISTD_H #include #endif #ifndef WIN32 #include #endif /* ------------------------------------------------------------------------- */ /* Function to restrict access to use of signal(). */ static void SignalIntercept_dealloc(SignalInterceptObject *self) { Py_DECREF(self->wrapped); } static SignalInterceptObject *newSignalInterceptObject(PyObject *wrapped) { SignalInterceptObject *self = NULL; self = PyObject_New(SignalInterceptObject, &SignalIntercept_Type); if (self == NULL) return NULL; Py_INCREF(wrapped); self->wrapped = wrapped; return self; } static PyObject *SignalIntercept_call( SignalInterceptObject *self, PyObject *args, PyObject *kwds) { PyObject *h = NULL; int n = 0; PyObject *m = NULL; if (wsgi_daemon_pid != 0 && wsgi_daemon_pid != getpid()) return PyObject_Call(self->wrapped, args, kwds); if (wsgi_worker_pid != 0 && wsgi_worker_pid != getpid()) return PyObject_Call(self->wrapped, args, kwds); if (!PyArg_ParseTuple(args, "iO:signal", &n, &h)) return NULL; Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_WARNING, 0, wsgi_server, "mod_wsgi (pid=%d): Callback registration for " "signal %d ignored.", getpid(), n); Py_END_ALLOW_THREADS m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_stack"); if (o) { PyObject *log = NULL; PyObject *args = NULL; PyObject *result = NULL; Py_INCREF(o); log = newLogObject(NULL, APLOG_WARNING, NULL, 0); args = Py_BuildValue("(OOO)", Py_None, Py_None, log); result = PyObject_CallObject(o, args); Py_XDECREF(result); Py_DECREF(args); Py_DECREF(log); Py_DECREF(o); } } Py_XDECREF(m); Py_INCREF(h); return h; } PyTypeObject SignalIntercept_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.SignalIntercept", /*tp_name*/ sizeof(SignalInterceptObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)SignalIntercept_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ (ternaryfunc)SignalIntercept_call, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; /* ------------------------------------------------------------------------- */ static PyObject *wsgi_system_exit(PyObject *self, PyObject *args) { PyErr_SetObject(PyExc_SystemExit, 0); return NULL; } /* ------------------------------------------------------------------------- */ static PyMethodDef wsgi_system_exit_method[] = { { "system_exit", (PyCFunction)wsgi_system_exit, METH_VARARGS, 0 }, { NULL }, }; /* ------------------------------------------------------------------------- */ /* Wrapper around Python interpreter instances. */ const char *wsgi_python_path = NULL; const char *wsgi_python_eggs = NULL; #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 4) static void ShutdownInterpreter_dealloc(ShutdownInterpreterObject *self) { Py_DECREF(self->wrapped); } static ShutdownInterpreterObject *newShutdownInterpreterObject( PyObject *wrapped) { ShutdownInterpreterObject *self = NULL; self = PyObject_New(ShutdownInterpreterObject, &ShutdownInterpreter_Type); if (self == NULL) return NULL; Py_INCREF(wrapped); self->wrapped = wrapped; return self; } static PyObject *ShutdownInterpreter_call( ShutdownInterpreterObject *self, PyObject *args, PyObject *kwds) { PyObject *result = NULL; result = PyObject_Call(self->wrapped, args, kwds); if (result) { PyObject *module = NULL; PyObject *exitfunc = NULL; PyThreadState *tstate = PyThreadState_Get(); PyThreadState *tstate_save = tstate; PyThreadState *tstate_next = NULL; #if PY_MAJOR_VERSION >= 3 module = PyImport_ImportModule("atexit"); if (module) { PyObject *dict = NULL; dict = PyModule_GetDict(module); exitfunc = PyDict_GetItemString(dict, "_run_exitfuncs"); } else PyErr_Clear(); #else exitfunc = PySys_GetObject("exitfunc"); #endif if (exitfunc) { PyObject *res = NULL; Py_INCREF(exitfunc); PySys_SetObject("exitfunc", (PyObject *)NULL); res = PyObject_CallObject(exitfunc, (PyObject *)NULL); if (res == NULL) { PyObject *m = NULL; PyObject *result = NULL; PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; if (PyErr_ExceptionMatches(PyExc_SystemExit)) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): SystemExit exception " "raised by exit functions ignored.", getpid()); Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Exception occurred within " "exit functions.", getpid()); Py_END_ALLOW_THREADS } PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); if (!value) { value = Py_None; Py_INCREF(value); } if (!traceback) { traceback = Py_None; Py_INCREF(traceback); } m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_exception"); if (o) { PyObject *log = NULL; PyObject *args = NULL; Py_INCREF(o); log = newLogObject(NULL, APLOG_ERR, NULL, 0); args = Py_BuildValue("(OOOOO)", type, value, traceback, Py_None, log); result = PyObject_CallObject(o, args); Py_DECREF(args); Py_DECREF(log); Py_DECREF(o); } } if (!result) { /* * If can't output exception and traceback then * use PyErr_Print to dump out details of the * exception. For SystemExit though if we do * that the process will actually be terminated * so can only clear the exception information * and keep going. */ PyErr_Restore(type, value, traceback); if (!PyErr_ExceptionMatches(PyExc_SystemExit)) { PyErr_Print(); PyErr_Clear(); } else { PyErr_Clear(); } } else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); } Py_XDECREF(result); Py_XDECREF(m); } Py_XDECREF(res); Py_DECREF(exitfunc); } Py_XDECREF(module); /* Delete remaining thread states. */ PyThreadState_Swap(NULL); tstate = PyInterpreterState_ThreadHead(tstate->interp); while (tstate) { tstate_next = PyThreadState_Next(tstate); if (tstate != tstate_save) { PyThreadState_Swap(tstate); PyThreadState_Clear(tstate); PyThreadState_Swap(NULL); PyThreadState_Delete(tstate); } tstate = tstate_next; } tstate = tstate_save; PyThreadState_Swap(tstate); } return result; } PyTypeObject ShutdownInterpreter_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.ShutdownInterpreter", /*tp_name*/ sizeof(ShutdownInterpreterObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)ShutdownInterpreter_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ (ternaryfunc)ShutdownInterpreter_call, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; #endif PyTypeObject Interpreter_Type; InterpreterObject *newInterpreterObject(const char *name) { PyInterpreterState *interp = NULL; InterpreterObject *self = NULL; PyThreadState *tstate = NULL; PyThreadState *save_tstate = NULL; PyObject *module = NULL; PyObject *object = NULL; PyObject *item = NULL; int max_threads = 0; int max_processes = 0; int is_threaded = 0; int is_forked = 0; int is_service_script = 0; const char *str = NULL; #if defined(WIN32) const char *python_home = 0; #endif /* Create handle for interpreter and local data. */ self = PyObject_New(InterpreterObject, &Interpreter_Type); if (self == NULL) return NULL; /* * If interpreter not named, then we want to bind * to the first Python interpreter instance created. * Give this interpreter an empty string as name. */ if (!name) { #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 7) interp = PyInterpreterState_Main(); #else interp = PyInterpreterState_Head(); while (PyInterpreterState_Next(interp)) interp = PyInterpreterState_Next(interp); #endif name = ""; } /* Save away the interpreter name. */ self->name = strdup(name); if (interp) { /* * Interpreter provided to us so will not be * responsible for deleting it later. This will * be the case for the main Python interpreter. */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Attach interpreter '%s'.", getpid(), name); self->interp = interp; self->owner = 0; /* Force import of threading module so that main * thread attribute of module is correctly set to * the main thread and not a secondary request * thread. */ module = PyImport_ImportModule("threading"); Py_XDECREF(module); } else { /* * Remember active thread state so can restore * it. This is actually the thread state * associated with simplified GIL state API. */ save_tstate = PyThreadState_Swap(NULL); /* * Create the interpreter. If creation of the * interpreter fails it will restore the * existing active thread state for us so don't * need to worry about it in that case. */ tstate = Py_NewInterpreter(); if (!tstate) { PyErr_SetString(PyExc_RuntimeError, "Py_NewInterpreter() failed"); Py_DECREF(self); return NULL; } Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Create interpreter '%s'.", getpid(), name); Py_END_ALLOW_THREADS self->interp = tstate->interp; self->owner = 1; /* * We need to replace threading._shutdown() with our own * function which will also call atexit callbacks after * threads are shutdown to cope with fact that Python * itself doesn't call the atexit callbacks in sub * interpreters. */ module = PyImport_ImportModule("threading"); #if PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 4) if (module) { PyObject *dict = NULL; PyObject *func = NULL; dict = PyModule_GetDict(module); func = PyDict_GetItemString(dict, "_shutdown"); if (func) { PyObject *wrapper = NULL; wrapper = (PyObject *)newShutdownInterpreterObject(func); PyDict_SetItemString(dict, "_shutdown", wrapper); Py_DECREF(wrapper); } } #endif Py_XDECREF(module); } /* * Install restricted objects for STDIN and STDOUT, * or log object for STDOUT as appropriate. Don't do * this if not running on Win32 and we believe we * are running in single process mode, otherwise * it prevents use of interactive debuggers such as * the 'pdb' module. */ object = newLogObject(NULL, APLOG_ERR, "", 1); PySys_SetObject("stderr", object); Py_DECREF(object); #ifndef WIN32 if (wsgi_parent_pid != getpid()) { #endif if (wsgi_server_config->restrict_stdout == 1) { object = (PyObject *)newRestrictedObject("sys.stdout"); PySys_SetObject("stdout", object); Py_DECREF(object); } else { object = newLogObject(NULL, APLOG_ERR, "", 1); PySys_SetObject("stdout", object); Py_DECREF(object); } if (wsgi_server_config->restrict_stdin == 1) { object = (PyObject *)newRestrictedObject("sys.stdin"); PySys_SetObject("stdin", object); Py_DECREF(object); } #ifndef WIN32 } #endif /* * Set sys.argv to one element list to fake out * modules that look there for Python command * line arguments as appropriate. */ object = PyList_New(0); #if PY_MAJOR_VERSION >= 3 item = PyUnicode_FromString("mod_wsgi"); #else item = PyString_FromString("mod_wsgi"); #endif PyList_Append(object, item); PySys_SetObject("argv", object); Py_DECREF(item); Py_DECREF(object); /* * Install intercept for signal handler registration * if appropriate. Don't do this though if number of * threads for daemon process was set as 0, indicating * a potential daemon process which is running a * service script. */ /* * If running in daemon mode and there are no threads * specified, must be running with service script, in * which case we register default signal handler for * SIGINT which throws a SystemExit exception. If * instead restricting signals, replace function for * registering signal handlers so they are ignored. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process && wsgi_daemon_process->group->threads == 0) { is_service_script = 1; module = PyImport_ImportModule("signal"); if (module) { PyObject *dict = NULL; PyObject *func = NULL; dict = PyModule_GetDict(module); func = PyDict_GetItemString(dict, "signal"); if (func) { PyObject *res = NULL; PyObject *args = NULL; PyObject *callback = NULL; Py_INCREF(func); callback = PyCFunction_New(&wsgi_system_exit_method[0], NULL); args = Py_BuildValue("(iO)", SIGTERM, callback); res = PyObject_CallObject(func, args); if (!res) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Call to " "'signal.signal()' to register exit " "function failed, ignoring.", getpid()); Py_END_ALLOW_THREADS } Py_XDECREF(res); Py_XDECREF(args); Py_XDECREF(callback); Py_DECREF(func); } } Py_XDECREF(module); } #endif if (!is_service_script && wsgi_server_config->restrict_signal != 0) { module = PyImport_ImportModule("signal"); if (module) { PyObject *dict = NULL; PyObject *func = NULL; dict = PyModule_GetDict(module); func = PyDict_GetItemString(dict, "signal"); if (func) { PyObject *wrapper = NULL; wrapper = (PyObject *)newSignalInterceptObject(func); PyDict_SetItemString(dict, "signal", wrapper); Py_DECREF(wrapper); } } Py_XDECREF(module); } /* * Force loading of codecs into interpreter. This has to be * done as not otherwise done in sub interpreters and if not * done, code running in sub interpreters can fail on some * platforms if a unicode string is added in sys.path and an * import then done. */ item = PyCodec_Encoder("ascii"); Py_XDECREF(item); /* * If running in daemon process, override as appropriate * the USER, USERNAME or LOGNAME environment variables * so that they match the user that the process is running * as. Need to do this else we inherit the value from the * Apache parent process which is likely wrong as will be * root or the user than ran sudo when Apache started. * Can't update these for normal Apache child processes * as that would change the expected environment of other * Apache modules. */ #ifndef WIN32 if (wsgi_daemon_pool) { module = PyImport_ImportModule("os"); if (module) { PyObject *dict = NULL; PyObject *key = NULL; PyObject *value = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "environ"); if (object) { struct passwd *pwent; pwent = getpwuid(geteuid()); if (pwent && getenv("USER")) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("USER"); value = PyUnicode_Decode(pwent->pw_name, strlen(pwent->pw_name), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("USER"); value = PyString_FromString(pwent->pw_name); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } if (pwent && getenv("USERNAME")) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("USERNAME"); value = PyUnicode_Decode(pwent->pw_name, strlen(pwent->pw_name), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("USERNAME"); value = PyString_FromString(pwent->pw_name); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } if (pwent && getenv("LOGNAME")) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("LOGNAME"); value = PyUnicode_Decode(pwent->pw_name, strlen(pwent->pw_name), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("LOGNAME"); value = PyString_FromString(pwent->pw_name); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } } Py_DECREF(module); } } #endif /* * If running in daemon process, override HOME environment * variable so that is matches the home directory of the * user that the process is running as. Need to do this as * Apache will inherit HOME from root user or user that ran * sudo and started Apache and this would be wrong. Can't * update HOME for normal Apache child processes as that * would change the expected environment of other Apache * modules. */ #ifndef WIN32 if (wsgi_daemon_pool) { module = PyImport_ImportModule("os"); if (module) { PyObject *dict = NULL; PyObject *key = NULL; PyObject *value = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "environ"); if (object) { struct passwd *pwent; pwent = getpwuid(geteuid()); if (pwent) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("HOME"); value = PyUnicode_Decode(pwent->pw_dir, strlen(pwent->pw_dir), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("HOME"); value = PyString_FromString(pwent->pw_dir); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } } Py_DECREF(module); } } #endif /* * Explicitly override the PYTHON_EGG_CACHE variable if it * was defined by Apache configuration. For embedded processes * this would have been done by using WSGIPythonEggs directive. * For daemon processes the 'python-eggs' option to the * WSGIDaemonProcess directive would have needed to be used. */ if (!wsgi_daemon_pool) wsgi_python_eggs = wsgi_server_config->python_eggs; if (wsgi_python_eggs) { module = PyImport_ImportModule("os"); if (module) { PyObject *dict = NULL; PyObject *key = NULL; PyObject *value = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "environ"); if (object) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("PYTHON_EGG_CACHE"); value = PyUnicode_Decode(wsgi_python_eggs, strlen(wsgi_python_eggs), Py_FileSystemDefaultEncoding, "surrogateescape"); #else key = PyString_FromString("PYTHON_EGG_CACHE"); value = PyString_FromString(wsgi_python_eggs); #endif PyObject_SetItem(object, key, value); Py_DECREF(key); Py_DECREF(value); } Py_DECREF(module); } } /* * Install user defined Python module search path. This is * added using site.addsitedir() so that any Python .pth * files are opened and additional directories so defined * are added to default Python search path as well. This * allows virtual Python environments to work. Note that * site.addsitedir() adds new directories at the end of * sys.path when they really need to be added in order at * the start. We therefore need to do a fiddle and shift * any newly added directories to the start of sys.path. */ if (!wsgi_daemon_pool) wsgi_python_path = wsgi_server_config->python_path; /* * We use a hack here on Windows to add the site-packages * directory into the Python module search path as well * as use of Python virtual environments doesn't work * otherwise if using 'python -m venv' or any released of * 'virtualenv' from 20.x onwards. */ #if defined(WIN32) python_home = wsgi_server_config->python_home; if (python_home && *python_home) { if (wsgi_python_path && *wsgi_python_path) { char delim[2]; delim[0] = DELIM; delim[1] = '\0'; wsgi_python_path = apr_pstrcat(wsgi_server->process->pool, python_home, "/Lib/site-packages", delim, wsgi_python_path, NULL); } else { wsgi_python_path = apr_pstrcat(wsgi_server->process->pool, python_home, "/Lib/site-packages", NULL); } } #endif module = PyImport_ImportModule("site"); if (wsgi_python_path && *wsgi_python_path) { PyObject *path = NULL; path = PySys_GetObject("path"); if (module && path) { PyObject *dict = NULL; PyObject *old = NULL; PyObject *new = NULL; PyObject *tmp = NULL; PyObject *item = NULL; int i = 0; old = PyList_New(0); new = PyList_New(0); tmp = PyList_New(0); for (i=0; i= 3 item = PyUnicode_DecodeFSDefaultAndSize(start, end-start); value = PyUnicode_AsUTF8(item); #else item = PyString_FromStringAndSize(start, end-start); value = PyString_AsString(item); #endif start = end+1; Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Adding '%s' to " "path.", getpid(), value); Py_END_ALLOW_THREADS args = Py_BuildValue("(O)", item); result = PyObject_CallObject(object, args); if (!result) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Call to " "'site.addsitedir()' failed for '%s', " "stopping.", getpid(), value); Py_END_ALLOW_THREADS } Py_XDECREF(result); Py_DECREF(item); Py_DECREF(args); end = strchr(start, DELIM); while (result && end) { #if PY_MAJOR_VERSION >= 3 item = PyUnicode_DecodeFSDefaultAndSize(start, end-start); value = PyUnicode_AsUTF8(item); #else item = PyString_FromStringAndSize(start, end-start); value = PyString_AsString(item); #endif start = end+1; Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Adding '%s' to " "path.", getpid(), value); Py_END_ALLOW_THREADS args = Py_BuildValue("(O)", item); result = PyObject_CallObject(object, args); if (!result) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): " "Call to 'site.addsitedir()' failed " "for '%s', stopping.", getpid(), value); Py_END_ALLOW_THREADS } Py_XDECREF(result); Py_DECREF(item); Py_DECREF(args); end = strchr(start, DELIM); } } #if PY_MAJOR_VERSION >= 3 item = PyUnicode_DecodeFSDefault(start); value = PyUnicode_AsUTF8(item); #else item = PyString_FromString(start); value = PyString_AsString(item); #endif Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Adding '%s' to " "path.", getpid(), value); Py_END_ALLOW_THREADS args = Py_BuildValue("(O)", item); result = PyObject_CallObject(object, args); if (!result) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Call to " "'site.addsitedir()' failed for '%s'.", getpid(), start); Py_END_ALLOW_THREADS } Py_XDECREF(result); Py_XDECREF(item); Py_DECREF(args); Py_DECREF(object); } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Unable to locate " "'site.addsitedir()'.", getpid()); Py_END_ALLOW_THREADS } for (i=0; igroup->home) { PyObject *path = NULL; const char *home = wsgi_daemon_process->group->home; path = PySys_GetObject("path"); if (module && path) { PyObject *item; #if PY_MAJOR_VERSION >= 3 item = PyUnicode_Decode(home, strlen(home), Py_FileSystemDefaultEncoding, "surrogateescape"); #else item = PyString_FromString(home); #endif PyList_Insert(path, 0, item); Py_DECREF(item); } } #endif Py_XDECREF(module); /* * Create 'mod_wsgi' Python module. We first try and import an * external Python module of the same name. The intent is * that this external module would provide optional features * implementable using pure Python code. Don't want to * include them in the main Apache mod_wsgi package as that * complicates that package and also wouldn't allow them to * be released to a separate schedule. It is easier for * people to replace Python modules package with a new * version than it is to replace Apache module package. */ module = PyImport_ImportModule("mod_wsgi"); if (!module) { PyObject *modules = NULL; modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, "mod_wsgi"); if (module) { PyErr_Print(); PyDict_DelItemString(modules, "mod_wsgi"); } PyErr_Clear(); module = PyImport_AddModule("mod_wsgi"); Py_INCREF(module); } else if (!*name) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Imported 'mod_wsgi'.", getpid()); Py_END_ALLOW_THREADS } /* * Add Apache module version information to the Python * 'mod_wsgi' module. */ PyModule_AddObject(module, "version", Py_BuildValue("(iii)", MOD_WSGI_MAJORVERSION_NUMBER, MOD_WSGI_MINORVERSION_NUMBER, MOD_WSGI_MICROVERSION_NUMBER)); /* Add type object for file wrapper. */ Py_INCREF(&Stream_Type); PyModule_AddObject(module, "FileWrapper", (PyObject *)&Stream_Type); /* * Add information about process group and application * group to the Python 'mod_wsgi' module. */ #if PY_MAJOR_VERSION >= 3 PyModule_AddObject(module, "process_group", PyUnicode_DecodeLatin1(wsgi_daemon_group, strlen(wsgi_daemon_group), NULL)); PyModule_AddObject(module, "application_group", PyUnicode_DecodeLatin1(name, strlen(name), NULL)); #else PyModule_AddObject(module, "process_group", PyString_FromString(wsgi_daemon_group)); PyModule_AddObject(module, "application_group", PyString_FromString(name)); #endif /* * Add information about number of processes and threads * available to the WSGI application to the 'mod_wsgi' module. * When running in embedded mode, this will be the same as * what the 'apache' module records for Apache itself. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process) { object = PyLong_FromLong(wsgi_daemon_process->group->processes); PyModule_AddObject(module, "maximum_processes", object); object = PyLong_FromLong(wsgi_daemon_process->group->threads); PyModule_AddObject(module, "threads_per_process", object); } else { ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded); if (is_threaded != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads); } ap_mpm_query(AP_MPMQ_IS_FORKED, &is_forked); if (is_forked != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_DAEMON_USED, &max_processes); if (max_processes == -1) { ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &max_processes); } } max_threads = (max_threads <= 0) ? 1 : max_threads; max_processes = (max_processes <= 0) ? 1 : max_processes; object = PyLong_FromLong(max_processes); PyModule_AddObject(module, "maximum_processes", object); object = PyLong_FromLong(max_threads); PyModule_AddObject(module, "threads_per_process", object); } #else ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded); if (is_threaded != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads); } ap_mpm_query(AP_MPMQ_IS_FORKED, &is_forked); if (is_forked != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_DAEMON_USED, &max_processes); if (max_processes == -1) { ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &max_processes); } } max_threads = (max_threads <= 0) ? 1 : max_threads; max_processes = (max_processes <= 0) ? 1 : max_processes; object = PyLong_FromLong(max_processes); PyModule_AddObject(module, "maximum_processes", object); object = PyLong_FromLong(max_threads); PyModule_AddObject(module, "threads_per_process", object); #endif PyModule_AddObject(module, "server_metrics", PyCFunction_New( &wsgi_server_metrics_method[0], NULL)); PyModule_AddObject(module, "process_metrics", PyCFunction_New( &wsgi_process_metrics_method[0], NULL)); PyModule_AddObject(module, "request_metrics", PyCFunction_New( &wsgi_request_metrics_method[0], NULL)); PyModule_AddObject(module, "subscribe_events", PyCFunction_New( &wsgi_subscribe_events_method[0], NULL)); PyModule_AddObject(module, "subscribe_shutdown", PyCFunction_New( &wsgi_subscribe_shutdown_method[0], NULL)); PyModule_AddObject(module, "event_callbacks", PyList_New(0)); PyModule_AddObject(module, "shutdown_callbacks", PyList_New(0)); PyModule_AddObject(module, "active_requests", PyDict_New()); PyModule_AddObject(module, "request_data", PyCFunction_New( &wsgi_request_data_method[0], NULL)); /* Done with the 'mod_wsgi' module. */ Py_DECREF(module); /* * Create 'apache' Python module. If this is not a daemon * process and it is the first interpreter created by * Python, we first try and import an external Python module * of the same name. The intent is that this external module * would provide the SWIG bindings for the internal Apache * APIs. Only support use of such bindings in the first * interpreter created due to threading issues in SWIG * generated. */ module = NULL; if (!wsgi_daemon_pool) { module = PyImport_ImportModule("apache"); if (!module) { PyObject *modules = NULL; modules = PyImport_GetModuleDict(); module = PyDict_GetItemString(modules, "apache"); if (module) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Unable to import " "'apache' extension module.", getpid()); Py_END_ALLOW_THREADS PyErr_Print(); PyDict_DelItemString(modules, "apache"); module = NULL; } PyErr_Clear(); } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Imported 'apache'.", getpid()); Py_END_ALLOW_THREADS } } if (!module) { module = PyImport_AddModule("apache"); Py_INCREF(module); } /* * Add Apache version information to the Python 'apache' * module. */ PyModule_AddObject(module, "version", Py_BuildValue("(iii)", AP_SERVER_MAJORVERSION_NUMBER, AP_SERVER_MINORVERSION_NUMBER, AP_SERVER_PATCHLEVEL_NUMBER)); /* * Add information about the Apache MPM configuration and * the number of processes and threads available. */ ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded); if (is_threaded != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads); } ap_mpm_query(AP_MPMQ_IS_FORKED, &is_forked); if (is_forked != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_DAEMON_USED, &max_processes); if (max_processes == -1) { ap_mpm_query(AP_MPMQ_MAX_DAEMONS, &max_processes); } } max_threads = (max_threads <= 0) ? 1 : max_threads; max_processes = (max_processes <= 0) ? 1 : max_processes; object = PyLong_FromLong(max_processes); PyModule_AddObject(module, "maximum_processes", object); object = PyLong_FromLong(max_threads); PyModule_AddObject(module, "threads_per_process", object); #if AP_MODULE_MAGIC_AT_LEAST(20051115,4) str = ap_get_server_description(); #else str = ap_get_server_version(); #endif #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(str, strlen(str), NULL); #else object = PyString_FromString(str); #endif PyModule_AddObject(module, "description", object); str = MPM_NAME; #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(str, strlen(str), NULL); #else object = PyString_FromString(str); #endif PyModule_AddObject(module, "mpm_name", object); str = ap_get_server_built(); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(str, strlen(str), NULL); #else object = PyString_FromString(str); #endif PyModule_AddObject(module, "build_date", object); /* Done with the 'apache' module. */ Py_DECREF(module); /* * If support for New Relic monitoring is enabled then * import New Relic agent module and initialise it. */ if (!wsgi_daemon_pool) { wsgi_newrelic_config_file = wsgi_server_config->newrelic_config_file; wsgi_newrelic_environment = wsgi_server_config->newrelic_environment; } if (wsgi_newrelic_config_file) { PyObject *dict = NULL; module = PyImport_ImportModule("newrelic.agent"); if (module) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d, process='%s', application='%s'): " "Imported 'newrelic.agent'.", getpid(), wsgi_daemon_group , name); Py_END_ALLOW_THREADS dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "initialize"); if (object) { PyObject *config_file = NULL; PyObject *environment = NULL; PyObject *result = NULL; #if PY_MAJOR_VERSION >= 3 config_file = PyUnicode_Decode(wsgi_newrelic_config_file, strlen(wsgi_newrelic_config_file), Py_FileSystemDefaultEncoding, "surrogateescape"); #else config_file = PyString_FromString(wsgi_newrelic_config_file); #endif if (wsgi_newrelic_environment) { #if PY_MAJOR_VERSION >= 3 environment = PyUnicode_Decode(wsgi_newrelic_environment, strlen(wsgi_newrelic_environment), Py_FileSystemDefaultEncoding, "surrogateescape"); #else environment = PyString_FromString( wsgi_newrelic_environment); #endif } else { Py_INCREF(Py_None); environment = Py_None; } result = PyObject_CallFunctionObjArgs(object, config_file, environment, NULL); if (!result) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Unable to initialise " "New Relic agent with config '%s'.", getpid(), wsgi_newrelic_config_file); Py_END_ALLOW_THREADS } Py_DECREF(config_file); Py_DECREF(environment); Py_XDECREF(result); Py_DECREF(object); } Py_XDECREF(module); } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Unable to import " "'newrelic.agent' module.", getpid()); Py_END_ALLOW_THREADS PyErr_Print(); PyErr_Clear(); } } /* * Restore previous thread state. Only need to do * this where had to create a new interpreter. This * is basically anything except the first Python * interpreter instance. We need to restore it in * these cases as came into the function holding the * simplified GIL state for this thread but creating * the interpreter has resulted in a new thread * state object being created bound to the newly * created interpreter. In doing this though we want * to cache the thread state object which has been * created when interpreter is created. This is so * it can be reused later ensuring that thread local * data persists between requests. */ if (self->owner) { #if APR_HAS_THREADS WSGIThreadInfo *thread_handle = NULL; self->tstate_table = apr_hash_make(wsgi_server->process->pool); thread_handle = wsgi_thread_info(1, 0); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Bind thread state for " "thread %d against interpreter '%s'.", getpid(), thread_handle->thread_id, self->name); } apr_hash_set(self->tstate_table, &thread_handle->thread_id, sizeof(thread_handle->thread_id), tstate); PyThreadState_Swap(save_tstate); #else self->tstate = tstate; PyThreadState_Swap(save_tstate); #endif } return self; } static void Interpreter_dealloc(InterpreterObject *self) { PyThreadState *tstate = NULL; PyObject *module = NULL; PyThreadState *tstate_enter = NULL; #if PY_MAJOR_VERSION < 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4) PyObject *exitfunc = NULL; #endif /* * We should always enter here with the Python GIL * held and an active thread state. This should only * now occur when shutting down interpreter and not * when releasing interpreter as don't support * recyling of interpreters within the process. Thus * the thread state should be that for the main * Python interpreter. Where dealing with a named * sub interpreter, we need to change the thread * state to that which was originally used to create * that sub interpreter before doing anything. */ tstate_enter = PyThreadState_Get(); if (*self->name) { #if APR_HAS_THREADS WSGIThreadInfo *thread_handle = NULL; thread_handle = wsgi_thread_info(1, 0); tstate = apr_hash_get(self->tstate_table, &thread_handle->thread_id, sizeof(thread_handle->thread_id)); if (!tstate) { tstate = PyThreadState_New(self->interp); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Create thread state for " "thread %d against interpreter '%s'.", getpid(), thread_handle->thread_id, self->name); } apr_hash_set(self->tstate_table, &thread_handle->thread_id, sizeof(thread_handle->thread_id), tstate); } #else tstate = self->tstate; #endif /* * Swap to interpreter thread state that was used when * the sub interpreter was created. */ PyThreadState_Swap(tstate); } /* Now destroy the sub interpreter. */ if (self->owner) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Destroy interpreter '%s'.", getpid(), self->name); Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Cleanup interpreter '%s'.", getpid(), self->name); Py_END_ALLOW_THREADS } /* * Because the thread state we are using was created outside * of any Python code and is not the same as the Python main * thread, there is no record of it within the 'threading' * module. We thus need to access current thread function of * the 'threading' module to force it to create a thread * handle for the thread. If we do not do this, then the * 'threading' modules exit function will always fail * because it will not be able to find a handle for this * thread. */ module = PyImport_ImportModule("threading"); if (!module) PyErr_Clear(); if (module) { PyObject *dict = NULL; PyObject *func = NULL; dict = PyModule_GetDict(module); #if PY_MAJOR_VERSION >= 3 func = PyDict_GetItemString(dict, "current_thread"); #else func = PyDict_GetItemString(dict, "currentThread"); #endif if (func) { PyObject *res = NULL; Py_INCREF(func); res = PyObject_CallObject(func, (PyObject *)NULL); if (!res) { PyErr_Clear(); } Py_XDECREF(res); Py_DECREF(func); } } /* * In Python 2.5.1 an exit function is no longer used to * shutdown and wait on non daemon threads which were created * from Python code. Instead, in Py_Main() it explicitly * calls 'threading._shutdown()'. Thus need to emulate this * behaviour for those versions. */ #if PY_MAJOR_VERSION < 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4) if (module) { PyObject *dict = NULL; PyObject *func = NULL; dict = PyModule_GetDict(module); func = PyDict_GetItemString(dict, "_shutdown"); if (func) { PyObject *res = NULL; Py_INCREF(func); res = PyObject_CallObject(func, (PyObject *)NULL); if (res == NULL) { PyObject *m = NULL; PyObject *result = NULL; PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Exception occurred within " "threading._shutdown().", getpid()); Py_END_ALLOW_THREADS PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); if (!value) { value = Py_None; Py_INCREF(value); } if (!traceback) { traceback = Py_None; Py_INCREF(traceback); } m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_exception"); if (o) { PyObject *log = NULL; PyObject *args = NULL; Py_INCREF(o); log = newLogObject(NULL, APLOG_ERR, NULL, 0); args = Py_BuildValue("(OOOOO)", type, value, traceback, Py_None, log); result = PyObject_CallObject(o, args); Py_DECREF(args); Py_DECREF(log); Py_DECREF(o); } } if (!result) { /* * If can't output exception and traceback then * use PyErr_Print to dump out details of the * exception. For SystemExit though if we do * that the process will actually be terminated * so can only clear the exception information * and keep going. */ PyErr_Restore(type, value, traceback); if (!PyErr_ExceptionMatches(PyExc_SystemExit)) { PyErr_Print(); PyErr_Clear(); } else { PyErr_Clear(); } } else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); } Py_XDECREF(result); Py_XDECREF(m); } Py_XDECREF(res); Py_DECREF(func); } } /* Finally done with 'threading' module. */ Py_XDECREF(module); /* * Invoke exit functions by calling sys.exitfunc() for * Python 2.X and atexit._run_exitfuncs() for Python 3.X. * Note that in Python 3.X we can't call this on main Python * interpreter as for Python 3.X it doesn't deregister * functions as called, so have no choice but to rely on * Py_Finalize() to do it for the main interpreter. Now * that simplified GIL state API usage sorted out, this * should be okay. */ module = NULL; #if PY_MAJOR_VERSION >= 3 if (self->owner) { module = PyImport_ImportModule("atexit"); if (module) { PyObject *dict = NULL; dict = PyModule_GetDict(module); exitfunc = PyDict_GetItemString(dict, "_run_exitfuncs"); } else PyErr_Clear(); } #else exitfunc = PySys_GetObject("exitfunc"); #endif if (exitfunc) { PyObject *res = NULL; Py_INCREF(exitfunc); PySys_SetObject("exitfunc", (PyObject *)NULL); res = PyObject_CallObject(exitfunc, (PyObject *)NULL); if (res == NULL) { PyObject *m = NULL; PyObject *result = NULL; PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; if (PyErr_ExceptionMatches(PyExc_SystemExit)) { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): SystemExit exception " "raised by exit functions ignored.", getpid()); Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Exception occurred within " "exit functions.", getpid()); Py_END_ALLOW_THREADS } PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); if (!value) { value = Py_None; Py_INCREF(value); } if (!traceback) { traceback = Py_None; Py_INCREF(traceback); } m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_exception"); if (o) { PyObject *log = NULL; PyObject *args = NULL; Py_INCREF(o); log = newLogObject(NULL, APLOG_ERR, NULL, 0); args = Py_BuildValue("(OOOOO)", type, value, traceback, Py_None, log); result = PyObject_CallObject(o, args); Py_DECREF(args); Py_DECREF(log); Py_DECREF(o); } } if (!result) { /* * If can't output exception and traceback then * use PyErr_Print to dump out details of the * exception. For SystemExit though if we do * that the process will actually be terminated * so can only clear the exception information * and keep going. */ PyErr_Restore(type, value, traceback); if (!PyErr_ExceptionMatches(PyExc_SystemExit)) { PyErr_Print(); PyErr_Clear(); } else { PyErr_Clear(); } } else { Py_XDECREF(type); Py_XDECREF(value); Py_XDECREF(traceback); } Py_XDECREF(result); Py_XDECREF(m); } Py_XDECREF(res); Py_DECREF(exitfunc); } Py_XDECREF(module); #endif /* If we own it, we destroy it. */ if (self->owner) { /* * We need to destroy all the thread state objects * associated with the interpreter. If there are * background threads that were created then this * may well cause them to crash the next time they * try to run. Only saving grace is that we are * trying to shutdown the process. */ #if PY_MAJOR_VERSION < 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 4) PyThreadState *tstate_save = tstate; PyThreadState *tstate_next = NULL; PyThreadState_Swap(NULL); tstate = PyInterpreterState_ThreadHead(tstate->interp); while (tstate) { tstate_next = PyThreadState_Next(tstate); if (tstate != tstate_save) { PyThreadState_Swap(tstate); PyThreadState_Clear(tstate); PyThreadState_Swap(NULL); PyThreadState_Delete(tstate); } tstate = tstate_next; } tstate = tstate_save; PyThreadState_Swap(tstate); #endif /* Can now destroy the interpreter. */ Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): End interpreter '%s'.", getpid(), self->name); Py_END_ALLOW_THREADS Py_EndInterpreter(tstate); PyThreadState_Swap(tstate_enter); } free(self->name); PyObject_Del(self); } PyTypeObject Interpreter_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Interpreter", /*tp_name*/ sizeof(InterpreterObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Interpreter_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; /* * Startup and shutdown of Python interpreter. In mod_wsgi if * the Python interpreter hasn't been initialised by another * Apache module such as mod_python, we will take control and * initialise it. Need to remember that we initialised Python * and whether done in parent or child process as when done in * the parent we also take responsibility for performing special * Python fixups after Apache is forked and child process has * run. * * Note that by default we now defer initialisation of Python * until after the fork of processes as Python 3.X by design * doesn't clean up properly when it is destroyed causing * significant memory leaks into Apache parent process on an * Apache restart. Some Python 2.X versions also have real * memory leaks but not near as much. The result of deferring * initialisation is that can't benefit from copy on write * semantics for loaded data across a fork. Each process will * therefore have higher memory requirement where Python needs * to be used. */ int wsgi_python_initialized = 0; #if defined(MOD_WSGI_DISABLE_EMBEDDED) int wsgi_python_required = 0; #else int wsgi_python_required = -1; #endif int wsgi_python_after_fork = 1; void wsgi_python_version(void) { const char *compile = PY_VERSION; const char *dynamic = 0; dynamic = strtok((char *)Py_GetVersion(), " "); if (strcmp(compile, dynamic) != 0) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, wsgi_server, "mod_wsgi: Compiled for Python/%s.", compile); ap_log_error(APLOG_MARK, APLOG_WARNING, 0, wsgi_server, "mod_wsgi: Runtime using Python/%s.", dynamic); } } apr_status_t wsgi_python_term(void) { PyObject *module = NULL; /* Skip destruction of Python interpreter. */ if (wsgi_server_config->destroy_interpreter == 0) return APR_SUCCESS; ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Terminating Python.", getpid()); /* * We should be executing in the main thread again at this * point but without the GIL, so simply restore the original * thread state for that thread that we remembered when we * initialised the interpreter. */ PyEval_AcquireThread(wsgi_main_tstate); /* * Work around bug in Python 3.X whereby it will crash if * atexit imported into sub interpreter, but never imported * into main interpreter before calling Py_Finalize(). We * perform an import of atexit module and it as side effect * must be performing required initialisation. */ module = PyImport_ImportModule("atexit"); Py_XDECREF(module); /* * In Python 2.6.5 and Python 3.1.2 the shutdown of * threading was moved back into Py_Finalize() for the main * Python interpreter. Because we shutting down threading * ourselves, the second call results in errors being logged * when Py_Finalize() is called and the shutdown function * called a second time. The errors don't indicate any real * problem and the threading module ignores them anyway. * Whether we are using Python with this changed behaviour * can only be checked by looking at run time version. * Rather than try and add a dynamic check, create a fake * 'dummy_threading' module as the presence of that shuts up * the messages. It doesn't matter that the rest of the * shutdown function still runs as everything is already * stopped so doesn't do anything. */ if (!PyImport_AddModule("dummy_threading")) PyErr_Clear(); /* * Shutdown Python interpreter completely. Just to be safe * flag daemon shutdown here again and do it within a lock * which is then shared with deadlock thread used for the * daemon. This is just to avoid any risk there is a race * condition. */ #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process) apr_thread_mutex_lock(wsgi_shutdown_lock); wsgi_daemon_shutdown++; #endif Py_Finalize(); #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process) apr_thread_mutex_unlock(wsgi_shutdown_lock); #endif wsgi_python_initialized = 0; ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Python has shutdown.", getpid()); return APR_SUCCESS; } static apr_status_t wsgi_python_parent_cleanup(void *data) { if (wsgi_parent_pid == getpid()) { /* * Destroy Python itself including the main * interpreter. If mod_python is being loaded it * is left to mod_python to destroy Python, * although it currently doesn't do so. */ if (wsgi_python_initialized) wsgi_python_term(); } return APR_SUCCESS; } void wsgi_python_init(apr_pool_t *p) { const char *python_home = 0; int is_pyvenv = 0; /* Perform initialisation if required. */ if (!Py_IsInitialized()) { /* Enable Python 3.0 migration warnings. */ #if PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6 if (wsgi_server_config->py3k_warning_flag == 1) Py_Py3kWarningFlag++; #endif /* Disable writing of byte code files. */ #if (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 3) || \ (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 6) if (wsgi_server_config->dont_write_bytecode == 1) Py_DontWriteBytecodeFlag++; #endif /* Check for Python paths and optimisation flag. */ if (wsgi_server_config->python_optimize > 0) Py_OptimizeFlag = wsgi_server_config->python_optimize; else Py_OptimizeFlag = 0; /* Check for control options for Python warnings. */ if (wsgi_server_config->python_warnings) { apr_array_header_t *options = NULL; char **entries; int i; options = wsgi_server_config->python_warnings; entries = (char **)options->elts; for (i = 0; i < options->nelts; ++i) { #if PY_MAJOR_VERSION >= 3 wchar_t *s = NULL; int len = strlen(entries[i])+1; s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t)); #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) wsgi_utf8_to_unicode_path(s, len, entries[i]); #else mbstowcs(s, entries[i], len); #endif PySys_AddWarnOption(s); #else PySys_AddWarnOption(entries[i]); #endif } } #if defined(WIN32) #if defined(WIN32_PYTHON_VENV_IS_BROKEN) /* * XXX Python new style virtual environments break Python embedding * API for Python initialisation on Windows. So disable this code as * any attempt to call Py_SetPythonHome() with location of the * virtual environment will not work and will break initialization * of the Python interpreter. Instead manually add the directory * Lib/site-packages to the Python module search path later if * WSGIPythonHome has been set. */ /* * Check for Python home being overridden. This is only being * used on Windows. For UNIX systems we actually do a fiddle * and work out where the Python executable would be and set * its location instead. This is to get around some brokeness * in pyvenv in Python 3.X. That fiddle doesn't work on Windows * so for Windows with pyvenv, and also virtualenv 20.X and * later, we do a later fiddle where add the virtual environment * site-packages directory to the Python module search path. */ python_home = wsgi_server_config->python_home; if (python_home) { ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Python home %s.", getpid(), python_home); } if (python_home) { #if PY_MAJOR_VERSION >= 3 wchar_t *s = NULL; int len = strlen(python_home)+1; s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t)); #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) wsgi_utf8_to_unicode_path(s, len, python_home); #else mbstowcs(s, python_home, len); #endif Py_SetPythonHome(s); #else Py_SetPythonHome((char *)python_home); #endif } #endif #else /* * Now for the UNIX version of the code to set the Python HOME. * For this things are a mess. If using pyvenv with Python 3.3+ * then setting Python HOME doesn't work. For it we need to use * Python executable location. Everything else seems to be cool * with setting Python HOME. We therefore need to detect when we * have a pyvenv by looking for the presence of pyvenv.cfg file. * We can simply just set Python executable everywhere as that * doesn't work with brew Python on MacOS X. */ python_home = wsgi_server_config->python_home; #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process && wsgi_daemon_process->group->python_home) python_home = wsgi_daemon_process->group->python_home; #endif if (python_home) { apr_status_t rv; apr_finfo_t finfo; char *pyvenv_cfg; const char *python_exe = 0; #if PY_MAJOR_VERSION >= 3 wchar_t *s = NULL; int len = 0; #endif /* * Is common to see people set the directory to an incorrect * location, including to a location within an inaccessible * user home directory, or to the 'python' executable itself. * Try and validate that the location is accessible and is a * directory. */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Python home %s.", getpid(), python_home); #if !defined(WIN32) rv = apr_stat(&finfo, python_home, APR_FINFO_NORM, p); if (rv != APR_SUCCESS) { ap_log_error(APLOG_MARK, APLOG_WARNING, rv, wsgi_server, "mod_wsgi (pid=%d): Unable to stat Python home " "%s. Python interpreter may not be able to be " "initialized correctly. Verify the supplied path " "and access permissions for whole of the path.", getpid(), python_home); } else { if (finfo.filetype != APR_DIR) { ap_log_error(APLOG_MARK, APLOG_WARNING, rv, wsgi_server, "mod_wsgi (pid=%d): Python home %s is not " "a directory. Python interpreter may not " "be able to be initialized correctly. " "Verify the supplied path.", getpid(), python_home); } else if (access(python_home, X_OK) == -1) { ap_log_error(APLOG_MARK, APLOG_WARNING, rv, wsgi_server, "mod_wsgi (pid=%d): Python home %s is not " "accessible. Python interpreter may not " "be able to be initialized correctly. " "Verify the supplied path and access " "permissions on the directory.", getpid(), python_home); } } #endif /* Now detect whether have a pyvenv with Python 3.3+. */ pyvenv_cfg = apr_pstrcat(p, python_home, "/pyvenv.cfg", NULL); #if defined(WIN32) if (access(pyvenv_cfg, 0) == 0) is_pyvenv = 1; #else if (access(pyvenv_cfg, R_OK) == 0) is_pyvenv = 1; #endif if (is_pyvenv) { /* * Embedded support for pyvenv is broken so need to * set Python executable location and cannot set the * Python HOME as is more desirable. */ python_exe = apr_pstrcat(p, python_home, "/bin/python", NULL); #if PY_MAJOR_VERSION >= 3 len = strlen(python_exe)+1; s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t)); #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) wsgi_utf8_to_unicode_path(s, len, python_exe); #else mbstowcs(s, python_exe, len); #endif Py_SetProgramName(s); #else Py_SetProgramName((char *)python_exe); #endif } else { #if PY_MAJOR_VERSION >= 3 len = strlen(python_home)+1; s = (wchar_t *)apr_palloc(p, len*sizeof(wchar_t)); #if defined(WIN32) && defined(APR_HAS_UNICODE_FS) wsgi_utf8_to_unicode_path(s, len, python_home); #else mbstowcs(s, python_home, len); #endif Py_SetPythonHome(s); #else Py_SetPythonHome((char *)python_home); #endif } } #endif /* * Set environment variable PYTHONHASHSEED. We need to * make sure we remove the environment variable later * so that it doesn't remain in the process environment * and be inherited by execd sub processes. */ if (wsgi_server_config->python_hash_seed != NULL) { char *envvar = apr_pstrcat(p, "PYTHONHASHSEED=", wsgi_server_config->python_hash_seed, NULL); ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Setting hash seed to %s.", getpid(), wsgi_server_config->python_hash_seed); putenv(envvar); } /* * Work around bug in Python 3.1 where it will crash * when used in non console application on Windows if * stdin/stdout have been initialised and aren't null. * Supposed to be fixed in Python 3.3. */ #if defined(WIN32) && PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 3 _wputenv(L"PYTHONIOENCODING=cp1252:backslashreplace"); #endif /* Initialise Python. */ ap_log_error(APLOG_MARK, APLOG_INFO, 0, wsgi_server, "mod_wsgi (pid=%d): Initializing Python.", getpid()); Py_Initialize(); #if PY_VERSION_HEX < 0x03090000 /* Initialise threading. */ PyEval_InitThreads(); #endif /* * Remove the environment variable we set for the hash * seed. This has to be done in os.environ, which will * in turn remove it from process environ. This should * only be necessary for the main interpreter. We need * to do this before we release the GIL. */ if (wsgi_server_config->python_hash_seed != NULL) { PyObject *module = NULL; module = PyImport_ImportModule("os"); if (module) { PyObject *dict = NULL; PyObject *object = NULL; PyObject *key = NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "environ"); if (object) { #if PY_MAJOR_VERSION >= 3 key = PyUnicode_FromString("PYTHONHASHSEED"); #else key = PyString_FromString("PYTHONHASHSEED"); #endif PyObject_DelItem(object, key); Py_DECREF(key); } Py_DECREF(module); } } /* * We now want to release the GIL. Before we do that * though we remember what the current thread state is. * We will use that later to restore the main thread * state when we want to cleanup interpreters on * shutdown. */ wsgi_main_tstate = PyThreadState_Get(); PyEval_ReleaseThread(wsgi_main_tstate); wsgi_python_initialized = 1; /* * Register cleanups to be performed on parent restart * or shutdown. This will destroy Python itself. */ apr_pool_cleanup_register(p, NULL, wsgi_python_parent_cleanup, apr_pool_cleanup_null); } } /* * Functions for acquiring and subsequently releasing desired * Python interpreter instance. When acquiring the interpreter * a new interpreter instance will be created on demand if it * is required. The Python GIL will be held on return when the * interpreter is acquired. */ #if APR_HAS_THREADS apr_thread_mutex_t* wsgi_interp_lock = NULL; apr_thread_mutex_t* wsgi_shutdown_lock = NULL; #endif PyObject *wsgi_interpreters = NULL; apr_hash_t *wsgi_interpreters_index = NULL; InterpreterObject *wsgi_acquire_interpreter(const char *name) { PyThreadState *tstate = NULL; PyInterpreterState *interp = NULL; InterpreterObject *handle = NULL; PyGILState_STATE state; /* * In a multithreaded MPM must protect the * interpreters table. This lock is only needed to * avoid a secondary thread coming in and creating * the same interpreter if Python releases the GIL * when an interpreter is being created. */ #if APR_HAS_THREADS apr_thread_mutex_lock(wsgi_interp_lock); #endif /* * This function should never be called when the * Python GIL is held, so need to acquire it. Even * though we may need to work with a sub * interpreter, we need to acquire GIL against main * interpreter first to work with interpreter * dictionary. */ state = PyGILState_Ensure(); /* * Check if already have interpreter instance and * if not need to create one. */ handle = (InterpreterObject *)PyDict_GetItemString(wsgi_interpreters, name); if (!handle) { handle = newInterpreterObject(name); if (!handle) { ap_log_error(APLOG_MARK, APLOG_CRIT, 0, wsgi_server, "mod_wsgi (pid=%d): Cannot create interpreter '%s'.", getpid(), name); PyErr_Print(); PyErr_Clear(); PyGILState_Release(state); #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_interp_lock); #endif return NULL; } PyDict_SetItemString(wsgi_interpreters, name, (PyObject *)handle); /* * Add interpreter name to index kept in Apache data * strcuture as well. Make a copy of the name just in * case we have been given temporary value. */ apr_hash_set(wsgi_interpreters_index, apr_pstrdup( apr_hash_pool_get(wsgi_interpreters_index), name), APR_HASH_KEY_STRING, ""); } else Py_INCREF(handle); interp = handle->interp; /* * Create new thread state object. We should only be * getting called where no current active thread * state, so no need to remember the old one. When * working with the main Python interpreter always * use the simplified API for GIL locking so any * extension modules which use that will still work. */ PyGILState_Release(state); #if APR_HAS_THREADS apr_thread_mutex_unlock(wsgi_interp_lock); #endif if (*name) { #if APR_HAS_THREADS WSGIThreadInfo *thread_handle = NULL; thread_handle = wsgi_thread_info(1, 0); tstate = apr_hash_get(handle->tstate_table, &thread_handle->thread_id, sizeof(thread_handle->thread_id)); if (!tstate) { tstate = PyThreadState_New(interp); if (wsgi_server_config->verbose_debugging) { ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, wsgi_server, "mod_wsgi (pid=%d): Create thread state for " "thread %d against interpreter '%s'.", getpid(), thread_handle->thread_id, handle->name); } apr_hash_set(handle->tstate_table, &thread_handle->thread_id, sizeof(thread_handle->thread_id), tstate); } #else tstate = handle->tstate; #endif PyEval_AcquireThread(tstate); } else { PyGILState_Ensure(); /* * When simplified GIL state API is used, the thread * local data only persists for the extent of the top * level matching ensure/release calls. We want to * extend lifetime of the thread local data beyond * that, retaining it for all requests within the one * thread for the life of the process. To do that we * need to artificially increment the reference count * for the associated thread state object. */ tstate = PyThreadState_Get(); if (tstate && tstate->gilstate_counter == 1) tstate->gilstate_counter++; } return handle; } void wsgi_release_interpreter(InterpreterObject *handle) { PyThreadState *tstate = NULL; PyGILState_STATE state; /* * Need to release and destroy the thread state that * was created against the interpreter. This will * release the GIL. Note that it should be safe to * always assume that the simplified GIL state API * lock was originally unlocked as always calling in * from an Apache thread when we acquire the * interpreter in the first place. */ if (*handle->name) { tstate = PyThreadState_Get(); PyEval_ReleaseThread(tstate); } else PyGILState_Release(PyGILState_UNLOCKED); /* * Need to reacquire the Python GIL just so we can * decrement our reference count to the interpreter * itself. If the interpreter has since been removed * from the table of interpreters this will result * in its destruction if its the last reference. */ state = PyGILState_Ensure(); Py_DECREF(handle); PyGILState_Release(state); } /* ------------------------------------------------------------------------- */ void wsgi_publish_process_stopping(char *reason) { InterpreterObject *interp = NULL; apr_hash_index_t *hi; hi = apr_hash_first(NULL, wsgi_interpreters_index); while (hi) { PyObject *event = NULL; PyObject *object = NULL; const void *key; apr_hash_this(hi, &key, NULL, NULL); interp = wsgi_acquire_interpreter((char *)key); event = PyDict_New(); #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(reason, strlen(reason), NULL); #else object = PyString_FromString(reason); #endif PyDict_SetItemString(event, "shutdown_reason", object); Py_DECREF(object); wsgi_publish_event("process_stopping", event); Py_DECREF(event); wsgi_release_interpreter(interp); hi = apr_hash_next(hi); } } /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_interp.h000066400000000000000000000044561452636074700201530ustar00rootroot00000000000000#ifndef WSGI_INTERP_H #define WSGI_INTERP_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" /* ------------------------------------------------------------------------- */ typedef struct { PyObject_HEAD PyObject *wrapped; } SignalInterceptObject; extern PyTypeObject SignalIntercept_Type; typedef struct { PyObject_HEAD PyObject *wrapped; } ShutdownInterpreterObject; extern PyTypeObject ShutdownInterpreter_Type; typedef struct { PyObject_HEAD char *name; PyInterpreterState *interp; int owner; #if APR_HAS_THREADS apr_hash_t *tstate_table; #else PyThreadState *tstate; #endif } InterpreterObject; extern PyTypeObject Interpreter_Type; extern InterpreterObject *newInterpreterObject(const char *name); extern int wsgi_python_initialized; extern int wsgi_python_after_fork; extern int wsgi_python_required; extern const char *wsgi_python_path; extern const char *wsgi_python_eggs; extern PyObject *wsgi_interpreters; extern apr_hash_t *wsgi_interpreters_index; #if APR_HAS_THREADS extern apr_thread_mutex_t *wsgi_interp_lock; extern apr_thread_mutex_t* wsgi_shutdown_lock; #endif extern void wsgi_python_version(void); extern void wsgi_python_init(apr_pool_t *p); extern apr_status_t wsgi_python_term(void); extern InterpreterObject *wsgi_acquire_interpreter(const char *name); extern void wsgi_release_interpreter(InterpreterObject *handle); extern void wsgi_publish_process_stopping(char *reason); /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_logger.c000066400000000000000000000444101452636074700201160ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_logger.h" #include "wsgi_server.h" #include "wsgi_metrics.h" #include "wsgi_thread.h" /* ------------------------------------------------------------------------- */ typedef struct { PyObject_HEAD const char *name; int proxy; request_rec *r; int level; char *s; long l; int expired; #if PY_MAJOR_VERSION < 3 long softspace; #endif } LogObject; PyTypeObject Log_Type; PyObject *newLogBufferObject(request_rec *r, int level, const char *name, int proxy) { LogObject *self; self = PyObject_New(LogObject, &Log_Type); if (self == NULL) return NULL; if (!name) name = ""; self->name = name; self->proxy = proxy; self->r = r; self->level = APLOG_NOERRNO|level; self->s = NULL; self->l = 0; self->expired = 0; #if PY_MAJOR_VERSION < 3 self->softspace = 0; #endif return (PyObject *)self; } PyObject *newLogWrapperObject(PyObject *buffer) { #if PY_MAJOR_VERSION >= 3 PyObject *module = NULL; PyObject *dict = NULL; PyObject *object = NULL; PyObject *args = NULL; PyObject *wrapper = NULL; module = PyImport_ImportModule("io"); if (!module) return NULL; dict = PyModule_GetDict(module); object = PyDict_GetItemString(dict, "TextIOWrapper"); if (!object) { PyErr_SetString(PyExc_NameError, "name 'TextIOWrapper' is not defined"); return NULL; } Py_INCREF(object); args = Py_BuildValue("(OssOOO)", buffer, "utf-8", "replace", Py_None, Py_True, Py_True); wrapper = PyObject_CallObject(object, args); Py_DECREF(args); Py_DECREF(object); return (PyObject *)wrapper; #else Py_INCREF(buffer); return (PyObject *)buffer; #endif } PyObject *newLogObject(request_rec *r, int level, const char *name, int proxy) { PyObject *buffer = NULL; PyObject *wrapper = NULL; buffer = newLogBufferObject(r, level, name, proxy); if (!buffer) return NULL; wrapper = newLogWrapperObject(buffer); Py_DECREF(buffer); return wrapper; } static void Log_call(LogObject *self, const char *s, long l) { /* * The length of the string to be logged is ignored * for now. We just pass the whole string to the * Apache error log functions. It will actually * truncate it at some value less than 8192 * characters depending on the length of the prefix * to go at the front. If there are embedded NULLs * then truncation will occur at that point. That * truncation occurs like this is also what happens * if using FASTCGI solutions for Apache, so not * doing anything different here. */ if (self->r) { Py_BEGIN_ALLOW_THREADS ap_log_rerror(APLOG_MARK, self->level, 0, self->r, "%s", s); Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS ap_log_error(APLOG_MARK, self->level, 0, wsgi_server, "%s", s); Py_END_ALLOW_THREADS } } static void Log_dealloc(LogObject *self) { if (self->s) { if (!self->expired) Log_call(self, self->s, self->l); free(self->s); } PyObject_Del(self); } static PyObject *Log_flush(LogObject *self, PyObject *args) { WSGIThreadInfo *thread_info = NULL; if (self->proxy) thread_info = wsgi_thread_info(0, 0); if (thread_info && thread_info->log_buffer) return Log_flush((LogObject *)thread_info->log_buffer, args); if (self->expired) { PyErr_SetString(PyExc_RuntimeError, "log object has expired"); return NULL; } if (self->s) { Log_call(self, self->s, self->l); free(self->s); self->s = NULL; self->l = 0; } Py_INCREF(Py_None); return Py_None; } static PyObject *Log_close(LogObject *self, PyObject *args) { PyObject *result = NULL; WSGIThreadInfo *thread_info = NULL; if (self->proxy) thread_info = wsgi_thread_info(0, 0); if (thread_info && thread_info->log_buffer) return Log_close((LogObject *)thread_info->log_buffer, args); if (!self->expired) result = Log_flush(self, args); Py_XDECREF(result); self->r = NULL; self->expired = 1; Py_INCREF(Py_None); return Py_None; } static PyObject *Log_isatty(LogObject *self, PyObject *args) { Py_INCREF(Py_False); return Py_False; } static void Log_queue(LogObject *self, const char *msg, Py_ssize_t len) { const char *p = NULL; const char *q = NULL; const char *e = NULL; p = msg; e = p + len; /* * Break string on newline. This is on assumption * that primarily textual information being logged. */ q = p; while (q != e) { if (*q == '\n') break; q++; } while (q != e) { /* Output each complete line. */ if (self->s) { /* Need to join with buffered value. */ long m = 0; long n = 0; char *s = NULL; m = self->l; n = m+q-p+1; s = (char *)malloc(n); memcpy(s, self->s, m); memcpy(s+m, p, q-p); s[n-1] = '\0'; free(self->s); self->s = NULL; self->l = 0; Log_call(self, s, n-1); free(s); } else { long n = 0; char *s = NULL; n = q-p+1; s = (char *)malloc(n); memcpy(s, p, q-p); s[n-1] = '\0'; Log_call(self, s, n-1); free(s); } p = q+1; /* Break string on newline. */ q = p; while (q != e) { if (*q == '\n') break; q++; } } if (p != e) { /* Save away incomplete line. */ if (self->s) { /* Need to join with buffered value. */ long m = 0; long n = 0; m = self->l; n = m+e-p+1; self->s = (char *)realloc(self->s, n); memcpy(self->s+m, p, e-p); self->s[n-1] = '\0'; self->l = n-1; } else { long n = 0; n = e-p+1; self->s = (char *)malloc(n); memcpy(self->s, p, n-1); self->s[n-1] = '\0'; self->l = n-1; } } } static PyObject *Log_write(LogObject *self, PyObject *args) { const char *msg = NULL; Py_ssize_t len = -1; WSGIThreadInfo *thread_info = NULL; if (self->proxy) thread_info = wsgi_thread_info(0, 0); if (thread_info && thread_info->log_buffer) return Log_write((LogObject *)thread_info->log_buffer, args); if (self->expired) { PyErr_SetString(PyExc_RuntimeError, "log object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "s#:write", &msg, &len)) return NULL; Log_queue(self, msg, len); Py_INCREF(Py_None); return Py_None; } static PyObject *Log_writelines(LogObject *self, PyObject *args) { PyObject *sequence = NULL; PyObject *iterator = NULL; PyObject *item = NULL; WSGIThreadInfo *thread_info = NULL; if (self->proxy) thread_info = wsgi_thread_info(0, 0); if (thread_info && thread_info->log_buffer) return Log_writelines((LogObject *)thread_info->log_buffer, args); if (self->expired) { PyErr_SetString(PyExc_RuntimeError, "log object has expired"); return NULL; } if (!PyArg_ParseTuple(args, "O:writelines", &sequence)) return NULL; iterator = PyObject_GetIter(sequence); if (iterator == NULL) { PyErr_SetString(PyExc_TypeError, "argument must be sequence of strings"); return NULL; } while ((item = PyIter_Next(iterator))) { PyObject *result = NULL; PyObject *args = NULL; args = PyTuple_Pack(1, item); result = Log_write(self, args); Py_DECREF(args); Py_DECREF(item); if (!result) { Py_DECREF(iterator); PyErr_SetString(PyExc_TypeError, "argument must be sequence of strings"); return NULL; } } Py_DECREF(iterator); Py_INCREF(Py_None); return Py_None; } #if PY_MAJOR_VERSION >= 3 static PyObject *Log_readable(LogObject *self, PyObject *args) { Py_INCREF(Py_False); return Py_False; } static PyObject *Log_seekable(LogObject *self, PyObject *args) { Py_INCREF(Py_False); return Py_False; } static PyObject *Log_writable(LogObject *self, PyObject *args) { Py_INCREF(Py_True); return Py_True; } static PyObject *Log_fileno(LogObject *self, PyObject *args) { PyErr_SetString(PyExc_IOError, "Apache/mod_wsgi log object is not " "associated with a file descriptor."); return NULL; } #endif static PyObject *Log_name(LogObject *self, void *closure) { #if PY_MAJOR_VERSION >= 3 return PyUnicode_FromString(self->name); #else return PyString_FromString(self->name); #endif } static PyObject *Log_closed(LogObject *self, void *closure) { Py_INCREF(Py_False); return Py_False; } #if PY_MAJOR_VERSION < 3 static PyObject *Log_get_softspace(LogObject *self, void *closure) { WSGIThreadInfo *thread_info = NULL; if (self->proxy) thread_info = wsgi_thread_info(0, 0); if (thread_info && thread_info->log_buffer) return Log_get_softspace((LogObject *)thread_info->log_buffer, closure); return PyInt_FromLong(self->softspace); } static int Log_set_softspace(LogObject *self, PyObject *value) { long new; WSGIThreadInfo *thread_info = NULL; if (self->proxy) thread_info = wsgi_thread_info(0, 0); if (thread_info && thread_info->log_buffer) return Log_set_softspace((LogObject *)thread_info->log_buffer, value); if (value == NULL) { PyErr_SetString(PyExc_TypeError, "can't delete softspace attribute"); return -1; } new = PyInt_AsLong(value); if (new == -1 && PyErr_Occurred()) return -1; self->softspace = new; return 0; } #else static PyObject *Log_get_encoding(LogObject *self, void *closure) { return PyUnicode_FromString("utf-8"); } static PyObject *Log_get_errors(LogObject *self, void *closure) { return PyUnicode_FromString("replace"); } #endif static PyMethodDef Log_methods[] = { { "flush", (PyCFunction)Log_flush, METH_NOARGS, 0 }, { "close", (PyCFunction)Log_close, METH_NOARGS, 0 }, { "isatty", (PyCFunction)Log_isatty, METH_NOARGS, 0 }, { "write", (PyCFunction)Log_write, METH_VARARGS, 0 }, { "writelines", (PyCFunction)Log_writelines, METH_VARARGS, 0 }, #if PY_MAJOR_VERSION >= 3 { "readable", (PyCFunction)Log_readable, METH_NOARGS, 0 }, { "seekable", (PyCFunction)Log_seekable, METH_NOARGS, 0 }, { "writable", (PyCFunction)Log_writable, METH_NOARGS, 0 }, { "fileno", (PyCFunction)Log_fileno, METH_NOARGS, 0 }, #endif { NULL, NULL} }; static PyGetSetDef Log_getset[] = { { "name", (getter)Log_name, NULL, 0 }, { "closed", (getter)Log_closed, NULL, 0 }, #if PY_MAJOR_VERSION < 3 { "softspace", (getter)Log_get_softspace, (setter)Log_set_softspace, 0 }, #else { "encoding", (getter)Log_get_encoding, NULL, 0 }, { "errors", (getter)Log_get_errors, NULL, 0 }, #endif { NULL }, }; PyTypeObject Log_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Log", /*tp_name*/ sizeof(LogObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Log_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ Log_methods, /*tp_methods*/ 0, /*tp_members*/ Log_getset, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; void wsgi_log_python_error(request_rec *r, PyObject *log, const char *filename, int publish) { PyObject *m = NULL; PyObject *result = NULL; PyObject *type = NULL; PyObject *value = NULL; PyObject *traceback = NULL; PyObject *xlog = NULL; if (!PyErr_Occurred()) return; if (!log) { PyErr_Fetch(&type, &value, &traceback); xlog = newLogObject(r, APLOG_ERR, NULL, 0); log = xlog; PyErr_Restore(type, value, traceback); type = NULL; value = NULL; traceback = NULL; } if (PyErr_ExceptionMatches(PyExc_SystemExit)) { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): SystemExit exception raised by " "WSGI script '%s' ignored.", getpid(), filename); } else { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): SystemExit exception raised by " "WSGI script '%s' ignored.", getpid(), filename); } Py_END_ALLOW_THREADS } else { Py_BEGIN_ALLOW_THREADS if (r) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "mod_wsgi (pid=%d): Exception occurred processing " "WSGI script '%s'.", getpid(), filename); } else { ap_log_error(APLOG_MARK, APLOG_ERR, 0, wsgi_server, "mod_wsgi (pid=%d): Exception occurred processing " "WSGI script '%s'.", getpid(), filename); } Py_END_ALLOW_THREADS } PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); if (!value) { value = Py_None; Py_INCREF(value); } if (!traceback) { traceback = Py_None; Py_INCREF(traceback); } m = PyImport_ImportModule("traceback"); if (m) { PyObject *d = NULL; PyObject *o = NULL; d = PyModule_GetDict(m); o = PyDict_GetItemString(d, "print_exception"); if (o) { PyObject *args = NULL; Py_INCREF(o); args = Py_BuildValue("(OOOOO)", type, value, traceback, Py_None, log); result = PyObject_CallObject(o, args); Py_DECREF(args); Py_DECREF(o); } } if (!result) { /* * If can't output exception and traceback then * use PyErr_Print to dump out details of the * exception. For SystemExit though if we do * that the process will actually be terminated * so can only clear the exception information * and keep going. */ PyErr_Restore(type, value, traceback); if (!PyErr_ExceptionMatches(PyExc_SystemExit)) { PyErr_Print(); PyErr_Clear(); } else { PyErr_Clear(); } } else { if (publish) { PyObject *event = NULL; PyObject *object = NULL; if (wsgi_event_subscribers()) { WSGIThreadInfo *thread_info; thread_info = wsgi_thread_info(0, 0); event = PyDict_New(); #if AP_MODULE_MAGIC_AT_LEAST(20100923,2) if (r->log_id) { #if PY_MAJOR_VERSION >= 3 object = PyUnicode_DecodeLatin1(r->log_id, strlen(r->log_id), NULL); #else object = PyString_FromString(r->log_id); #endif PyDict_SetItemString(event, "request_id", object); Py_DECREF(object); } #endif object = Py_BuildValue("(OOO)", type, value, traceback); PyDict_SetItemString(event, "exception_info", object); Py_DECREF(object); PyDict_SetItemString(event, "request_data", thread_info->request_data); wsgi_publish_event("request_exception", event); Py_DECREF(event); } } Py_DECREF(type); Py_DECREF(value); Py_DECREF(traceback); } Py_XDECREF(result); Py_XDECREF(m); Py_XDECREF(xlog); } /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_logger.h000066400000000000000000000030001452636074700201110ustar00rootroot00000000000000#ifndef WSGI_LOGGER_H #define WSGI_LOGGER_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" /* ------------------------------------------------------------------------- */ extern PyTypeObject Log_Type; extern PyObject *newLogBufferObject(request_rec *r, int level, const char *name, int proxy); extern PyObject *newLogWrapperObject(PyObject *buffer); extern PyObject *newLogObject(request_rec *r, int level, const char *name, int proxy); extern void wsgi_log_python_error(request_rec *r, PyObject *log, const char *filename, int publish); /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_memory.c000066400000000000000000000113421452636074700201450ustar00rootroot00000000000000/* * Author: David Robert Nadeau * Site: http://NadeauSoftware.com/ * License: Creative Commons Attribution 3.0 Unported License * http://creativecommons.org/licenses/by/3.0/deed.en_US */ #if defined(_WIN32) #include #include #define PSAPI_VERSION 1 #include #pragma comment(lib, "psapi.lib") #elif defined(__unix__) || defined(__unix) || defined(unix) || (defined(__APPLE__) && defined(__MACH__)) #include #include #if defined(__APPLE__) && defined(__MACH__) #include #elif (defined(_AIX) || defined(__TOS__AIX__)) || (defined(__sun__) || defined(__sun) || defined(sun) && (defined(__SVR4) || defined(__svr4__))) #include #include #elif defined(__linux__) || defined(__linux) || defined(linux) || defined(__gnu_linux__) #include #endif #else #error "Cannot define getPeakRSS( ) or getCurrentRSS( ) for an unknown OS." #endif /** * Returns the peak (maximum so far) resident set size (physical * memory use) measured in bytes, or zero if the value cannot be * determined on this OS. */ static size_t getPeakRSS(void) { #if defined(_WIN32) /* Windows -------------------------------------------------- */ PROCESS_MEMORY_COUNTERS info; GetProcessMemoryInfo( GetCurrentProcess( ), &info, sizeof(info) ); return (size_t)info.PeakWorkingSetSize; #elif (defined(_AIX) || defined(__TOS__AIX__)) || (defined(__sun__) || defined(__sun) || defined(sun) && (defined(__SVR4) || defined(__svr4__))) /* AIX and Solaris ------------------------------------------ */ struct psinfo psinfo; int fd = -1; if ( (fd = open( "/proc/self/psinfo", O_RDONLY )) == -1 ) return (size_t)0L; /* Can't open? */ if ( read( fd, &psinfo, sizeof(psinfo) ) != sizeof(psinfo) ) { close( fd ); return (size_t)0L; /* Can't read? */ } close( fd ); return (size_t)(psinfo.pr_rssize * 1024L); #elif defined(__unix__) || defined(__unix) || defined(unix) || (defined(__APPLE__) && defined(__MACH__)) /* BSD, Linux, and OSX -------------------------------------- */ struct rusage rusage; getrusage( RUSAGE_SELF, &rusage ); #if defined(__APPLE__) && defined(__MACH__) return (size_t)rusage.ru_maxrss; #else return (size_t)(rusage.ru_maxrss * 1024L); #endif #else /* Unknown OS ----------------------------------------------- */ return (size_t)0L; /* Unsupported. */ #endif } /** * Returns the current resident set size (physical memory use) measured * in bytes, or zero if the value cannot be determined on this OS. */ static size_t getCurrentRSS(void) { #if defined(_WIN32) /* Windows -------------------------------------------------- */ PROCESS_MEMORY_COUNTERS info; GetProcessMemoryInfo( GetCurrentProcess( ), &info, sizeof(info) ); return (size_t)info.WorkingSetSize; #elif defined(__APPLE__) && defined(__MACH__) /* OSX ------------------------------------------------------ */ #if defined(MACH_TASK_BASIC_INFO) struct mach_task_basic_info info; mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT; if ( task_info( mach_task_self( ), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount ) != KERN_SUCCESS ) return (size_t)0L; /* Can't access? */ return (size_t)info.resident_size; #else struct task_basic_info info; mach_msg_type_number_t infoCount = TASK_BASIC_INFO_COUNT; if ( task_info( mach_task_self( ), TASK_BASIC_INFO, (task_info_t)&info, &infoCount ) != KERN_SUCCESS ) return (size_t)0L; /* Can't access? */ return (size_t)info.resident_size; #endif #elif defined(__linux__) || defined(__linux) || defined(linux) || defined(__gnu_linux__) /* Linux ---------------------------------------------------- */ long rss = 0L; FILE* fp = NULL; if ( (fp = fopen( "/proc/self/statm", "r" )) == NULL ) return (size_t)0L; /* Can't open? */ if ( fscanf( fp, "%*s%ld", &rss ) != 1 ) { fclose( fp ); return (size_t)0L; /* Can't read? */ } fclose( fp ); return (size_t)rss * (size_t)sysconf( _SC_PAGESIZE); #else /* AIX, BSD, Solaris, and Unknown OS ------------------------ */ return (size_t)0L; /* Unsupported. */ #endif } #include "wsgi_memory.h" size_t wsgi_get_peak_memory_RSS(void) { return getPeakRSS(); } size_t wsgi_get_current_memory_RSS(void) { return getCurrentRSS(); } mod_wsgi-5.0.0/src/server/wsgi_memory.h000066400000000000000000000022111452636074700201450ustar00rootroot00000000000000#ifndef WSGI_MEMORY_H #define WSGI_MEMORY_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" /* ------------------------------------------------------------------------- */ extern size_t wsgi_get_peak_memory_RSS(void); extern size_t wsgi_get_current_memory_RSS(void); /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_metrics.c000066400000000000000000001232741452636074700203130ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_metrics.h" #include "wsgi_apache.h" #include "wsgi_daemon.h" #include "wsgi_server.h" #include "wsgi_memory.h" #include "wsgi_logger.h" #include "wsgi_thread.h" /* ------------------------------------------------------------------------- */ /* * Thread utilisation. On start and end of requests, * and when utilisation is requested, we acrue an * ongoing utilisation time value so can monitor how * busy we are handling requests. */ apr_uint64_t wsgi_total_requests = 0; int wsgi_active_requests = 0; static double wsgi_thread_utilization = 0.0; static apr_time_t wsgi_utilization_last = 0; /* Request tracking and timing. */ apr_thread_mutex_t* wsgi_monitor_lock = NULL; static double wsgi_utilization_time(int adjustment, apr_uint64_t* request_count) { apr_time_t now; double utilization = wsgi_thread_utilization; apr_thread_mutex_lock(wsgi_monitor_lock); now = apr_time_now(); if (wsgi_utilization_last != 0.0) { utilization = (now - wsgi_utilization_last) / 1000000.0; if (utilization < 0) utilization = 0; utilization = wsgi_active_requests * utilization; wsgi_thread_utilization += utilization; utilization = wsgi_thread_utilization; } wsgi_utilization_last = now; wsgi_active_requests += adjustment; if (adjustment < 0) wsgi_total_requests += -adjustment; if (request_count) *request_count = wsgi_total_requests; apr_thread_mutex_unlock(wsgi_monitor_lock); return utilization; } static int wsgi_request_metrics_enabled = 0; static apr_uint64_t wsgi_sample_requests = 0; static double wsgi_server_time_total = 0; static int wsgi_server_time_buckets[16]; static double wsgi_queue_time_total = 0; static int wsgi_queue_time_buckets[16]; static double wsgi_daemon_time_total = 0; static int wsgi_daemon_time_buckets[16]; static double wsgi_application_time_total = 0; static int wsgi_application_time_buckets[16]; static int* wsgi_request_threads_buckets = NULL; void wsgi_record_time_in_buckets(int* buckets, double duration) { int index = 0; double threshold = 0.005; while (index < 14) { if (duration <= threshold) { buckets[index] += 1; return; } threshold *= 2; index += 1; } buckets[index] += 1; } void wsgi_record_request_times(apr_time_t request_start, apr_time_t queue_start, apr_time_t daemon_start, apr_time_t application_start, apr_time_t application_finish) { double server_time = 0.0; double queue_time = 0.0; double daemon_time = 0.0; double application_time = 0.0; if (wsgi_request_metrics_enabled == 0) return; if (queue_start) { server_time = apr_time_sec((double)(queue_start-request_start)); queue_time = apr_time_sec((double)(daemon_start-queue_start)); daemon_time = apr_time_sec((double)(application_start-daemon_start)); } else { server_time = apr_time_sec((double)(application_start-request_start)); daemon_time = 0; queue_time = 0; } application_time = (apr_time_sec((double)(application_finish- application_start))); apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_sample_requests += 1; wsgi_server_time_total += server_time; wsgi_queue_time_total += queue_time; wsgi_daemon_time_total += daemon_time; wsgi_application_time_total += application_time; wsgi_record_time_in_buckets(&wsgi_server_time_buckets[0], server_time); #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process) { wsgi_record_time_in_buckets(&wsgi_queue_time_buckets[0], queue_time); wsgi_record_time_in_buckets(&wsgi_daemon_time_buckets[0], daemon_time); } #endif wsgi_record_time_in_buckets(&wsgi_application_time_buckets[0], application_time); apr_thread_mutex_unlock(wsgi_monitor_lock); } WSGIThreadInfo *wsgi_start_request(request_rec *r) { WSGIThreadInfo *thread_info; PyObject *module = NULL; thread_info = wsgi_thread_info(1, 1); thread_info->request_data = PyDict_New(); #if AP_MODULE_MAGIC_AT_LEAST(20100923,2) #if PY_MAJOR_VERSION >= 3 thread_info->request_id = PyUnicode_DecodeLatin1(r->log_id, strlen(r->log_id), NULL); #else thread_info->request_id = PyString_FromString(r->log_id); #endif module = PyImport_ImportModule("mod_wsgi"); if (module) { PyObject *dict = NULL; PyObject *requests = NULL; dict = PyModule_GetDict(module); requests = PyDict_GetItemString(dict, "active_requests"); if (requests) PyDict_SetItem(requests, thread_info->request_id, thread_info->request_data); Py_DECREF(module); } else PyErr_Clear(); #endif wsgi_utilization_time(1, NULL); return thread_info; } void wsgi_end_request(void) { WSGIThreadInfo *thread_info; PyObject *module = NULL; thread_info = wsgi_thread_info(0, 1); if (thread_info) { if (wsgi_request_threads_buckets) wsgi_request_threads_buckets[thread_info->thread_id-1] += 1; #if AP_MODULE_MAGIC_AT_LEAST(20100923,2) module = PyImport_ImportModule("mod_wsgi"); if (module) { PyObject *dict = NULL; PyObject *requests = NULL; dict = PyModule_GetDict(module); requests = PyDict_GetItemString(dict, "active_requests"); PyDict_DelItem(requests, thread_info->request_id); Py_DECREF(module); } else PyErr_Clear(); #endif if (thread_info->log_buffer) Py_CLEAR(thread_info->log_buffer); if (thread_info->request_id) Py_CLEAR(thread_info->request_id); if (thread_info->request_data) Py_CLEAR(thread_info->request_data); } wsgi_utilization_time(-1, NULL); } /* ------------------------------------------------------------------------- */ static int wsgi_interns_initialized = 0; WSGI_STATIC_INTERNED_STRING(server_limit); WSGI_STATIC_INTERNED_STRING(thread_limit); WSGI_STATIC_INTERNED_STRING(running_generation); WSGI_STATIC_INTERNED_STRING(restart_time); WSGI_STATIC_INTERNED_STRING(current_time); WSGI_STATIC_INTERNED_STRING(running_time); WSGI_STATIC_INTERNED_STRING(process_num); WSGI_STATIC_INTERNED_STRING(pid); WSGI_STATIC_INTERNED_STRING(generation); WSGI_STATIC_INTERNED_STRING(quiescing); WSGI_STATIC_INTERNED_STRING(workers); WSGI_STATIC_INTERNED_STRING(thread_num); WSGI_STATIC_INTERNED_STRING(status); WSGI_STATIC_INTERNED_STRING(access_count); WSGI_STATIC_INTERNED_STRING(bytes_served); WSGI_STATIC_INTERNED_STRING(start_time); WSGI_STATIC_INTERNED_STRING(stop_time); WSGI_STATIC_INTERNED_STRING(last_used); WSGI_STATIC_INTERNED_STRING(client); WSGI_STATIC_INTERNED_STRING(request); WSGI_STATIC_INTERNED_STRING(vhost); WSGI_STATIC_INTERNED_STRING(processes); WSGI_STATIC_INTERNED_STRING(request_count); WSGI_STATIC_INTERNED_STRING(request_busy_time); WSGI_STATIC_INTERNED_STRING(memory_max_rss); WSGI_STATIC_INTERNED_STRING(memory_rss); WSGI_STATIC_INTERNED_STRING(cpu_user_time); WSGI_STATIC_INTERNED_STRING(cpu_system_time); WSGI_STATIC_INTERNED_STRING(request_threads); WSGI_STATIC_INTERNED_STRING(active_requests); WSGI_STATIC_INTERNED_STRING(threads); WSGI_STATIC_INTERNED_STRING(thread_id); WSGI_STATIC_INTERNED_STRING(sample_period); WSGI_STATIC_INTERNED_STRING(request_threads_maximum); WSGI_STATIC_INTERNED_STRING(request_threads_started); WSGI_STATIC_INTERNED_STRING(request_threads_active); WSGI_STATIC_INTERNED_STRING(capacity_utilization); WSGI_STATIC_INTERNED_STRING(request_throughput); WSGI_STATIC_INTERNED_STRING(server_time); WSGI_STATIC_INTERNED_STRING(queue_time); WSGI_STATIC_INTERNED_STRING(daemon_time); WSGI_STATIC_INTERNED_STRING(application_time); WSGI_STATIC_INTERNED_STRING(server_time_buckets); WSGI_STATIC_INTERNED_STRING(queue_time_buckets); WSGI_STATIC_INTERNED_STRING(daemon_time_buckets); WSGI_STATIC_INTERNED_STRING(application_time_buckets); WSGI_STATIC_INTERNED_STRING(request_threads_buckets); static PyObject *wsgi_status_flags[SERVER_NUM_STATUS]; #define WSGI_CREATE_STATUS_FLAG(name, val) \ wsgi_status_flags[name] = wsgi_PyString_InternFromString(val) static void wsgi_initialize_interned_strings(void) { /* Initialise interned strings the first time. */ if (!wsgi_interns_initialized) { WSGI_CREATE_INTERNED_STRING_ID(server_limit); WSGI_CREATE_INTERNED_STRING_ID(thread_limit); WSGI_CREATE_INTERNED_STRING_ID(running_generation); WSGI_CREATE_INTERNED_STRING_ID(restart_time); WSGI_CREATE_INTERNED_STRING_ID(current_time); WSGI_CREATE_INTERNED_STRING_ID(running_time); WSGI_CREATE_INTERNED_STRING_ID(process_num); WSGI_CREATE_INTERNED_STRING_ID(pid); WSGI_CREATE_INTERNED_STRING_ID(generation); WSGI_CREATE_INTERNED_STRING_ID(quiescing); WSGI_CREATE_INTERNED_STRING_ID(workers); WSGI_CREATE_INTERNED_STRING_ID(thread_num); WSGI_CREATE_INTERNED_STRING_ID(status); WSGI_CREATE_INTERNED_STRING_ID(access_count); WSGI_CREATE_INTERNED_STRING_ID(bytes_served); WSGI_CREATE_INTERNED_STRING_ID(start_time); WSGI_CREATE_INTERNED_STRING_ID(stop_time); WSGI_CREATE_INTERNED_STRING_ID(last_used); WSGI_CREATE_INTERNED_STRING_ID(client); WSGI_CREATE_INTERNED_STRING_ID(request); WSGI_CREATE_INTERNED_STRING_ID(vhost); WSGI_CREATE_INTERNED_STRING_ID(processes); WSGI_CREATE_INTERNED_STRING_ID(request_count); WSGI_CREATE_INTERNED_STRING_ID(request_busy_time); WSGI_CREATE_INTERNED_STRING_ID(memory_max_rss); WSGI_CREATE_INTERNED_STRING_ID(memory_rss); WSGI_CREATE_INTERNED_STRING_ID(cpu_user_time); WSGI_CREATE_INTERNED_STRING_ID(cpu_system_time); WSGI_CREATE_INTERNED_STRING_ID(request_threads); WSGI_CREATE_INTERNED_STRING_ID(active_requests); WSGI_CREATE_INTERNED_STRING_ID(threads); WSGI_CREATE_INTERNED_STRING_ID(thread_id); WSGI_CREATE_INTERNED_STRING_ID(sample_period); WSGI_CREATE_INTERNED_STRING_ID(request_threads_maximum); WSGI_CREATE_INTERNED_STRING_ID(request_threads_started); WSGI_CREATE_INTERNED_STRING_ID(request_threads_active); WSGI_CREATE_INTERNED_STRING_ID(capacity_utilization); WSGI_CREATE_INTERNED_STRING_ID(request_throughput); WSGI_CREATE_INTERNED_STRING_ID(server_time); WSGI_CREATE_INTERNED_STRING_ID(queue_time); WSGI_CREATE_INTERNED_STRING_ID(daemon_time); WSGI_CREATE_INTERNED_STRING_ID(application_time); WSGI_CREATE_INTERNED_STRING_ID(server_time_buckets); WSGI_CREATE_INTERNED_STRING_ID(daemon_time_buckets); WSGI_CREATE_INTERNED_STRING_ID(queue_time_buckets); WSGI_CREATE_INTERNED_STRING_ID(application_time_buckets); WSGI_CREATE_INTERNED_STRING_ID(request_threads_buckets); WSGI_CREATE_STATUS_FLAG(SERVER_DEAD, "."); WSGI_CREATE_STATUS_FLAG(SERVER_READY, "_"); WSGI_CREATE_STATUS_FLAG(SERVER_STARTING, "S"); WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_READ, "R"); WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_WRITE, "W"); WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_KEEPALIVE, "K"); WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_LOG, "L"); WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_DNS, "D"); WSGI_CREATE_STATUS_FLAG(SERVER_CLOSING, "C"); WSGI_CREATE_STATUS_FLAG(SERVER_GRACEFUL, "G"); WSGI_CREATE_STATUS_FLAG(SERVER_IDLE_KILL, "I"); wsgi_interns_initialized = 1; } } /* ------------------------------------------------------------------------- */ static PyObject *wsgi_request_metrics(void) { PyObject *result = NULL; PyObject *object = NULL; apr_time_t stop_time; double stop_request_busy_time = 0.0; apr_uint64_t stop_request_count = 0.0; double request_busy_time = 0.0; double capacity_utilization = 0.0; static double start_time = 0.0; static double start_cpu_system_time = 0.0; static double start_cpu_user_time = 0.0; static double start_request_busy_time = 0.0; static apr_uint64_t start_request_count = 0; double sample_period = 0.0; apr_uint64_t request_count = 0; double request_throughput = 0.0; double stop_cpu_system_time = 0.0; double stop_cpu_user_time = 0.0; double cpu_system_time = 0.0; double cpu_user_time = 0.0; double total_cpu_time = 0.0; static int request_threads_maximum = 0; apr_uint64_t interval_requests = 0; double server_time_total = 0; double server_time_avg = 0; double queue_time_total = 0; double queue_time_avg = 0; double daemon_time_total = 0; double daemon_time_avg = 0; double application_time_total = 0; double application_time_avg = 0; int request_threads_active = 0; int i; #ifdef HAVE_TIMES struct tms tmsbuf; static float tick = 0.0; if (!tick) { #ifdef _SC_CLK_TCK tick = sysconf(_SC_CLK_TCK); #else tick = HZ; #endif } #endif if (!wsgi_interns_initialized) wsgi_initialize_interned_strings(); if (!request_threads_maximum) { int is_threaded = 0; #if defined(MOD_WSGI_WITH_DAEMONS) if (wsgi_daemon_process) { request_threads_maximum = wsgi_daemon_process->group->threads; } else { ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded); if (is_threaded != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_THREADS, &request_threads_maximum); } } #else ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded); if (is_threaded != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_THREADS, &request_threads_maximum); } #endif request_threads_maximum = ((request_threads_maximum <= 0) ? 1 : request_threads_maximum); wsgi_request_threads_buckets = (int *)apr_pcalloc( wsgi_server_config->pool, request_threads_maximum*sizeof( wsgi_request_threads_buckets[0])); } result = PyDict_New(); stop_time = apr_time_now(); stop_request_busy_time = wsgi_utilization_time(0, &stop_request_count); if (!start_time) { start_time = stop_time; start_request_busy_time = stop_request_busy_time; start_request_count = stop_request_count; #ifdef HAVE_TIMES times(&tmsbuf); start_cpu_user_time = tmsbuf.tms_utime / tick; start_cpu_system_time = tmsbuf.tms_stime / tick; #else start_cpu_user_time = 0.0; start_cpu_system_time = 0.0; #endif apr_thread_mutex_lock(wsgi_monitor_lock); wsgi_sample_requests = 0; wsgi_server_time_total = 0.0; wsgi_queue_time_total = 0.0; wsgi_daemon_time_total = 0.0; wsgi_application_time_total = 0.0; wsgi_request_metrics_enabled = 1; apr_thread_mutex_unlock(wsgi_monitor_lock); return result; } object = wsgi_PyInt_FromLong(getpid()); PyDict_SetItem(result, WSGI_INTERNED_STRING(pid), object); Py_DECREF(object); object = PyFloat_FromDouble(apr_time_sec((double)start_time)); PyDict_SetItem(result, WSGI_INTERNED_STRING(start_time), object); Py_DECREF(object); object = PyFloat_FromDouble(apr_time_sec((double)stop_time)); PyDict_SetItem(result, WSGI_INTERNED_STRING(stop_time), object); Py_DECREF(object); sample_period = (apr_time_sec((double)stop_time) - apr_time_sec((double)start_time)); object = PyFloat_FromDouble(sample_period); PyDict_SetItem(result, WSGI_INTERNED_STRING(sample_period), object); Py_DECREF(object); #ifdef HAVE_TIMES times(&tmsbuf); stop_cpu_user_time = tmsbuf.tms_utime / tick; stop_cpu_system_time = tmsbuf.tms_stime / tick; cpu_user_time = ((stop_cpu_user_time - start_cpu_user_time) / sample_period); cpu_system_time = ((stop_cpu_system_time - start_cpu_system_time) / sample_period); total_cpu_time += cpu_user_time; total_cpu_time += cpu_system_time; object = PyFloat_FromDouble(cpu_user_time); PyDict_SetItem(result, WSGI_INTERNED_STRING(cpu_user_time), object); Py_DECREF(object); object = PyFloat_FromDouble(cpu_system_time); PyDict_SetItem(result, WSGI_INTERNED_STRING(cpu_system_time), object); Py_DECREF(object); #else object = PyFloat_FromDouble(0.0); PyDict_SetItem(result, WSGI_INTERNED_STRING(cpu_user_time), object); Py_DECREF(object); object = PyFloat_FromDouble(0.0); PyDict_SetItem(result, WSGI_INTERNED_STRING(cpu_system_time), object); Py_DECREF(object); #endif object = wsgi_PyInt_FromLongLong(wsgi_get_peak_memory_RSS()); PyDict_SetItem(result, WSGI_INTERNED_STRING(memory_max_rss), object); Py_DECREF(object); object = wsgi_PyInt_FromLongLong(wsgi_get_current_memory_RSS()); PyDict_SetItem(result, WSGI_INTERNED_STRING(memory_rss), object); Py_DECREF(object); object = wsgi_PyInt_FromLong(request_threads_maximum); PyDict_SetItem(result, WSGI_INTERNED_STRING(request_threads_maximum), object); Py_DECREF(object); object = wsgi_PyInt_FromLong(wsgi_request_threads); PyDict_SetItem(result, WSGI_INTERNED_STRING(request_threads_started), object); Py_DECREF(object); request_busy_time = stop_request_busy_time - start_request_busy_time; capacity_utilization = (request_busy_time / sample_period / request_threads_maximum); object = PyFloat_FromDouble(capacity_utilization); PyDict_SetItem(result, WSGI_INTERNED_STRING(capacity_utilization), object); Py_DECREF(object); request_count = stop_request_count - start_request_count; object = wsgi_PyInt_FromLongLong(request_count); PyDict_SetItem(result, WSGI_INTERNED_STRING(request_count), object); Py_DECREF(object); request_throughput = sample_period ? request_count / sample_period : 0; object = PyFloat_FromDouble(request_throughput); PyDict_SetItem(result, WSGI_INTERNED_STRING(request_throughput), object); Py_DECREF(object); start_time = stop_time; start_request_busy_time = stop_request_busy_time; start_request_count = stop_request_count; start_cpu_user_time = stop_cpu_user_time; start_cpu_system_time = stop_cpu_system_time; apr_thread_mutex_lock(wsgi_monitor_lock); interval_requests = wsgi_sample_requests; server_time_total = wsgi_server_time_total; queue_time_total = wsgi_queue_time_total; daemon_time_total = wsgi_daemon_time_total; application_time_total = wsgi_application_time_total; object = PyList_New(16); for (i=0; i<16; i++) { PyList_SET_ITEM(object, i, wsgi_PyInt_FromLong( wsgi_server_time_buckets[i])); } PyDict_SetItem(result, WSGI_INTERNED_STRING(server_time_buckets), object); Py_DECREF(object); object = PyList_New(16); for (i=0; i<16; i++) { PyList_SET_ITEM(object, i, wsgi_PyInt_FromLong( wsgi_queue_time_buckets[i])); } PyDict_SetItem(result, WSGI_INTERNED_STRING(queue_time_buckets), object); Py_DECREF(object); object = PyList_New(16); for (i=0; i<16; i++) { PyList_SET_ITEM(object, i, wsgi_PyInt_FromLong( wsgi_daemon_time_buckets[i])); } PyDict_SetItem(result, WSGI_INTERNED_STRING(daemon_time_buckets), object); Py_DECREF(object); object = PyList_New(16); for (i=0; i<16; i++) { PyList_SET_ITEM(object, i, wsgi_PyInt_FromLong( wsgi_application_time_buckets[i])); } PyDict_SetItem(result, WSGI_INTERNED_STRING(application_time_buckets), object); Py_DECREF(object); object = PyList_New(request_threads_maximum); for (i=0; ielts; for (i=0; inelts; i++) { PyObject *entry = NULL; if (thread_info[i]->request_thread) { entry = PyDict_New(); object = wsgi_PyInt_FromLong(thread_info[i]->thread_id); PyDict_SetItem(entry, WSGI_INTERNED_STRING(thread_id), object); Py_DECREF(object); object = wsgi_PyInt_FromLongLong(thread_info[i]->request_count); PyDict_SetItem(entry, WSGI_INTERNED_STRING(request_count), object); Py_DECREF(object); PyList_Append(thread_list, entry); Py_DECREF(entry); } } Py_DECREF(thread_list); return result; } PyMethodDef wsgi_process_metrics_method[] = { { "process_metrics", (PyCFunction)wsgi_process_metrics, METH_NOARGS, 0 }, { NULL }, }; /* ------------------------------------------------------------------------- */ static PyObject *wsgi_server_metrics(void) { PyObject *scoreboard_dict = NULL; PyObject *process_list = NULL; PyObject *object = NULL; apr_time_t current_time; apr_interval_time_t running_time; global_score *gs_record; worker_score *ws_record; process_score *ps_record; int j, i; if (!wsgi_interns_initialized) wsgi_initialize_interned_strings(); /* Scoreboard needs to exist and server metrics enabled. */ if (!ap_exists_scoreboard_image()) { Py_INCREF(Py_None); return Py_None; } if (!wsgi_daemon_pool) { if (!wsgi_server_config->server_metrics) { Py_INCREF(Py_None); return Py_None; } } #if defined(MOD_WSGI_WITH_DAEMONS) else { if (!wsgi_daemon_process->group->server_metrics) { Py_INCREF(Py_None); return Py_None; } } #endif gs_record = ap_get_scoreboard_global(); if (!gs_record) { Py_INCREF(Py_None); return Py_None; } /* Return everything in a dictionary. Start with global. */ scoreboard_dict = PyDict_New(); object = wsgi_PyInt_FromLong(gs_record->server_limit); PyDict_SetItem(scoreboard_dict, WSGI_INTERNED_STRING(server_limit), object); Py_DECREF(object); object = wsgi_PyInt_FromLong(gs_record->thread_limit); PyDict_SetItem(scoreboard_dict, WSGI_INTERNED_STRING(thread_limit), object); Py_DECREF(object); object = wsgi_PyInt_FromLong(gs_record->running_generation); PyDict_SetItem(scoreboard_dict, WSGI_INTERNED_STRING(running_generation), object); Py_DECREF(object); object = PyFloat_FromDouble(apr_time_sec(( double)gs_record->restart_time)); PyDict_SetItem(scoreboard_dict, WSGI_INTERNED_STRING(restart_time), object); Py_DECREF(object); current_time = apr_time_now(); object = PyFloat_FromDouble(apr_time_sec((double)current_time)); PyDict_SetItem(scoreboard_dict, WSGI_INTERNED_STRING(current_time), object); Py_DECREF(object); running_time = (apr_uint32_t)apr_time_sec((double) current_time - ap_scoreboard_image->global->restart_time); object = wsgi_PyInt_FromLongLong(running_time); PyDict_SetItem(scoreboard_dict, WSGI_INTERNED_STRING(running_time), object); Py_DECREF(object); /* Now add in the processes/workers. */ process_list = PyList_New(0); for (i = 0; i < gs_record->server_limit; ++i) { PyObject *process_dict = NULL; PyObject *worker_list = NULL; ps_record = ap_get_scoreboard_process(i); process_dict = PyDict_New(); PyList_Append(process_list, process_dict); object = wsgi_PyInt_FromLong(i); PyDict_SetItem(process_dict, WSGI_INTERNED_STRING(process_num), object); Py_DECREF(object); object = wsgi_PyInt_FromLong(ps_record->pid); PyDict_SetItem(process_dict, WSGI_INTERNED_STRING(pid), object); Py_DECREF(object); object = wsgi_PyInt_FromLong(ps_record->generation); PyDict_SetItem(process_dict, WSGI_INTERNED_STRING(generation), object); Py_DECREF(object); object = PyBool_FromLong(ps_record->quiescing); PyDict_SetItem(process_dict, WSGI_INTERNED_STRING(quiescing), object); Py_DECREF(object); worker_list = PyList_New(0); PyDict_SetItem(process_dict, WSGI_INTERNED_STRING(workers), worker_list); for (j = 0; j < gs_record->thread_limit; ++j) { PyObject *worker_dict = NULL; #if AP_MODULE_MAGIC_AT_LEAST(20071023,0) ws_record = ap_get_scoreboard_worker_from_indexes(i, j); #else ws_record = ap_get_scoreboard_worker(i, j); #endif worker_dict = PyDict_New(); PyList_Append(worker_list, worker_dict); object = wsgi_PyInt_FromLong(ws_record->thread_num); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(thread_num), object); Py_DECREF(object); object = wsgi_PyInt_FromLong(ws_record->generation); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(generation), object); Py_DECREF(object); object = wsgi_status_flags[ws_record->status]; PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(status), object); object = wsgi_PyInt_FromLong(ws_record->access_count); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(access_count), object); Py_DECREF(object); object = wsgi_PyInt_FromUnsignedLongLong(ws_record->bytes_served); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(bytes_served), object); Py_DECREF(object); object = PyFloat_FromDouble(apr_time_sec( (double)ws_record->start_time)); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(start_time), object); Py_DECREF(object); object = PyFloat_FromDouble(apr_time_sec( (double)ws_record->stop_time)); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(stop_time), object); Py_DECREF(object); object = PyFloat_FromDouble(apr_time_sec( (double)ws_record->last_used)); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(last_used), object); Py_DECREF(object); object = wsgi_PyString_FromString(ws_record->client); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(client), object); Py_DECREF(object); object = wsgi_PyString_FromString(ws_record->request); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(request), object); Py_DECREF(object); object = wsgi_PyString_FromString(ws_record->vhost); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(vhost), object); Py_DECREF(object); Py_DECREF(worker_dict); } Py_DECREF(worker_list); Py_DECREF(process_dict); } PyDict_SetItem(scoreboard_dict, WSGI_INTERNED_STRING(processes), process_list); Py_DECREF(process_list); return scoreboard_dict; } /* ------------------------------------------------------------------------- */ PyMethodDef wsgi_server_metrics_method[] = { { "server_metrics", (PyCFunction)wsgi_server_metrics, METH_NOARGS, 0 }, { NULL }, }; /* ------------------------------------------------------------------------- */ static PyObject *wsgi_subscribe_events(PyObject *self, PyObject *args) { PyObject *callback = NULL; PyObject *module = NULL; if (!PyArg_ParseTuple(args, "O", &callback)) return NULL; module = PyImport_ImportModule("mod_wsgi"); if (module) { PyObject *dict = NULL; PyObject *list = NULL; dict = PyModule_GetDict(module); list = PyDict_GetItemString(dict, "event_callbacks"); if (list) PyList_Append(list, callback); else return NULL; Py_DECREF(module); } else return NULL; Py_INCREF(Py_None); return Py_None; } static PyObject *wsgi_subscribe_shutdown(PyObject *self, PyObject *args) { PyObject *callback = NULL; PyObject *module = NULL; if (!PyArg_ParseTuple(args, "O", &callback)) return NULL; module = PyImport_ImportModule("mod_wsgi"); if (module) { PyObject *dict = NULL; PyObject *list = NULL; dict = PyModule_GetDict(module); list = PyDict_GetItemString(dict, "shutdown_callbacks"); if (list) PyList_Append(list, callback); else return NULL; Py_DECREF(module); } else return NULL; Py_INCREF(Py_None); return Py_None; } long wsgi_event_subscribers(void) { PyObject *module = NULL; module = PyImport_ImportModule("mod_wsgi"); if (module) { PyObject *dict = NULL; PyObject *list = NULL; long result = 0; dict = PyModule_GetDict(module); list = PyDict_GetItemString(dict, "event_callbacks"); if (list) result = PyList_Size(list); Py_DECREF(module); return result; } else return 0; } void wsgi_call_callbacks(const char *name, PyObject *callbacks, PyObject *event) { int i; for (i=0; irequest_data) { PyErr_SetString(PyExc_RuntimeError, "no active request for thread"); return NULL; } Py_INCREF(thread_info->request_data); return thread_info->request_data; } /* ------------------------------------------------------------------------- */ PyMethodDef wsgi_request_data_method[] = { { "request_data", (PyCFunction)wsgi_request_data, METH_NOARGS, 0 }, { NULL }, }; /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_metrics.h000066400000000000000000000035761452636074700203220ustar00rootroot00000000000000#ifndef WSGI_METRICS_H #define WSGI_METRICS_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" #include "wsgi_thread.h" /* ------------------------------------------------------------------------- */ extern apr_uint64_t wsgi_total_requests; extern int wsgi_active_requests; extern apr_thread_mutex_t* wsgi_monitor_lock; extern PyMethodDef wsgi_request_metrics_method[]; extern PyMethodDef wsgi_process_metrics_method[]; extern WSGIThreadInfo *wsgi_start_request(request_rec *r); extern void wsgi_end_request(void); extern void wsgi_record_request_times(apr_time_t request_start, apr_time_t queue_start, apr_time_t daemon_start, apr_time_t application_start, apr_time_t application_finish); extern PyMethodDef wsgi_server_metrics_method[]; extern long wsgi_event_subscribers(void); extern void wsgi_publish_event(const char *name, PyObject *event); extern PyMethodDef wsgi_subscribe_events_method[]; extern PyMethodDef wsgi_subscribe_shutdown_method[]; extern PyMethodDef wsgi_request_data_method[]; /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_python.h000066400000000000000000000125331452636074700201660ustar00rootroot00000000000000#ifndef WSGI_PYTHON_H #define WSGI_PYTHON_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #define PY_SSIZE_T_CLEAN 1 #include #if !defined(PY_VERSION_HEX) #error Sorry, Python developer package does not appear to be installed. #endif #if PY_VERSION_HEX <= 0x02030000 #error Sorry, mod_wsgi requires at least Python 2.3.0 for Python 2.X. #endif #if PY_VERSION_HEX >= 0x03000000 && PY_VERSION_HEX < 0x03010000 #error Sorry, mod_wsgi requires at least Python 3.1.0 for Python 3.X. #endif #if !defined(WITH_THREAD) #error Sorry, mod_wsgi requires that Python supporting thread. #endif #include "structmember.h" #include "compile.h" #include "osdefs.h" #include "frameobject.h" #ifndef PyVarObject_HEAD_INIT #define PyVarObject_HEAD_INIT(type, size) \ PyObject_HEAD_INIT(type) size, #endif #ifndef Py_REFCNT #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt) #endif #ifndef Py_TYPE #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type) #endif #ifndef Py_SIZE #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size) #endif #if PY_MAJOR_VERSION >= 3 #define PyStringObject PyBytesObject #define PyString_Check PyBytes_Check #define PyString_Size PyBytes_Size #define PyString_AsString PyBytes_AsString #define PyString_FromString PyBytes_FromString #define PyString_FromStringAndSize PyBytes_FromStringAndSize #define PyString_AS_STRING PyBytes_AS_STRING #define PyString_GET_SIZE PyBytes_GET_SIZE #define _PyString_Resize _PyBytes_Resize #endif #if PY_MAJOR_VERSION < 3 #ifndef PyBytesObject #define PyBytesObject PyStringObject #define PyBytes_Type PyString_Type #define PyBytes_Check PyString_Check #define PyBytes_CheckExact PyString_CheckExact #define PyBytes_CHECK_INTERNED PyString_CHECK_INTERNED #define PyBytes_AS_STRING PyString_AS_STRING #define PyBytes_GET_SIZE PyString_GET_SIZE #define Py_TPFLAGS_BYTES_SUBCLASS Py_TPFLAGS_STRING_SUBCLASS #define PyBytes_FromStringAndSize PyString_FromStringAndSize #define PyBytes_FromString PyString_FromString #define PyBytes_FromFormatV PyString_FromFormatV #define PyBytes_FromFormat PyString_FromFormat #define PyBytes_Size PyString_Size #define PyBytes_AsString PyString_AsString #define PyBytes_Repr PyString_Repr #define PyBytes_Concat PyString_Concat #define PyBytes_ConcatAndDel PyString_ConcatAndDel #define _PyBytes_Resize _PyString_Resize #define _PyBytes_Eq _PyString_Eq #define PyBytes_Format PyString_Format #define _PyBytes_FormatLong _PyString_FormatLong #define PyBytes_DecodeEscape PyString_DecodeEscape #define _PyBytes_Join _PyString_Join #define PyBytes_AsStringAndSize PyString_AsStringAndSize #define _PyBytes_InsertThousandsGrouping _PyString_InsertThousandsGrouping #endif #endif /* ------------------------------------------------------------------------- */ #if PY_MAJOR_VERSION >= 3 #define wsgi_PyString_InternFromString(str) \ PyUnicode_InternFromString(str) #else #define wsgi_PyString_InternFromString(str) \ PyString_InternFromString(str) #endif #if PY_MAJOR_VERSION >= 3 #define wsgi_PyString_FromString(str) \ PyUnicode_DecodeLatin1(str, strlen(str), NULL) #else #define wsgi_PyString_FromString(str) \ PyString_FromString(str) #endif #ifdef HAVE_LONG_LONG #define wsgi_PyInt_FromLongLong(val) \ PyLong_FromLongLong(val) #else #if PY_MAJOR_VERSION >= 3 #define wsgi_PyInt_FromLongLong(val) \ PyLong_FromLong(val) #else #define wsgi_PyInt_FromLongLong(val) \ PyInt_FromLong(val) #endif #endif #ifdef HAVE_LONG_LONG #define wsgi_PyInt_FromUnsignedLongLong(val) \ PyLong_FromUnsignedLongLong(val) #else #if PY_MAJOR_VERSION >= 3 #define wsgi_PyInt_FromUnsignedLongLong(val) \ PyLong_FromLong(val) #else #define wsgi_PyInt_FromUnsignedLongLong(val) \ PyInt_FromLong(val) #endif #endif #if PY_MAJOR_VERSION >= 3 #define wsgi_PyInt_FromLong(val) \ PyLong_FromLong(val) #else #define wsgi_PyInt_FromLong(val) \ PyInt_FromLong(val) #endif #if PY_MAJOR_VERSION >= 3 #define wsgi_PyInt_FromUnsignedLong(val) \ PyLong_FromUnsignedLong(val) #else #define wsgi_PyInt_FromUnsignedLong(val) \ PyInt_FromUnsignedLong(val) #endif /* ------------------------------------------------------------------------- */ #define WSGI_STATIC_INTERNED_STRING(name) \ static PyObject *wsgi_id_##name #define WSGI_CREATE_INTERNED_STRING(name, val) \ if (wsgi_id_##name) ; else wsgi_id_##name = \ wsgi_PyString_InternFromString(val) #define WSGI_CREATE_INTERNED_STRING_ID(name) \ if (wsgi_id_##name) ; else wsgi_id_##name = \ wsgi_PyString_InternFromString(#name) #define WSGI_INTERNED_STRING(name) \ wsgi_id_##name /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_restrict.c000066400000000000000000000063141452636074700204770ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_restrict.h" /* ------------------------------------------------------------------------- */ PyTypeObject Restricted_Type; RestrictedObject *newRestrictedObject(const char *s) { RestrictedObject *self; self = PyObject_New(RestrictedObject, &Restricted_Type); if (self == NULL) return NULL; self->s = s; return self; } static void Restricted_dealloc(RestrictedObject *self) { PyObject_Del(self); } static PyObject *Restricted_getattr(RestrictedObject *self, char *name) { PyErr_Format(PyExc_IOError, "%s access restricted by mod_wsgi", self->s); return NULL; } PyTypeObject Restricted_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.Restricted", /*tp_name*/ sizeof(RestrictedObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Restricted_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ (getattrfunc)Restricted_getattr, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ 0, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ 0, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_restrict.h000066400000000000000000000023741452636074700205060ustar00rootroot00000000000000#ifndef WSGI_RESTRICT_H #define WSGI_RESTRICT_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" /* ------------------------------------------------------------------------- */ /* Restricted object to stop access to STDIN/STDOUT. */ typedef struct { PyObject_HEAD const char *s; } RestrictedObject; extern PyTypeObject Restricted_Type; extern RestrictedObject *newRestrictedObject(const char *s); /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_server.c000066400000000000000000000067611452636074700201540ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_server.h" #include "wsgi_daemon.h" /* ------------------------------------------------------------------------- */ /* Base server object. */ server_rec *wsgi_server = NULL; apr_pool_t *wsgi_daemon_pool = NULL; const char *wsgi_daemon_group = ""; /* Process information. */ pid_t wsgi_parent_pid = 0; pid_t wsgi_worker_pid = 0; pid_t wsgi_daemon_pid = 0; apr_time_t wsgi_restart_time = 0; /* New Relic monitoring agent. */ const char *wsgi_newrelic_config_file = NULL; const char *wsgi_newrelic_environment = NULL; /* Python interpreter state. */ PyThreadState *wsgi_main_tstate = NULL; /* Configuration objects. */ WSGIServerConfig *wsgi_server_config = NULL; WSGIScriptFile *newWSGIScriptFile(apr_pool_t *p) { WSGIScriptFile *object = NULL; object = (WSGIScriptFile *)apr_pcalloc(p, sizeof(WSGIScriptFile)); object->handler_script = NULL; object->application_group = NULL; object->process_group = NULL; return object; } WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p) { WSGIServerConfig *object = NULL; object = (WSGIServerConfig *)apr_pcalloc(p, sizeof(WSGIServerConfig)); object->pool = p; object->alias_list = NULL; object->socket_prefix = NULL; #if defined(MOD_WSGI_WITH_DAEMONS) object->socket_prefix = DEFAULT_REL_RUNTIMEDIR "/wsgi"; object->socket_prefix = ap_server_root_relative(p, object->socket_prefix); #endif object->socket_rotation = 1; object->verbose_debugging = 0; object->python_warnings = NULL; object->py3k_warning_flag = -1; object->python_optimize = -1; object->dont_write_bytecode = -1; object->lang = NULL; object->locale = NULL; object->python_home = NULL; object->python_path = NULL; object->python_eggs = NULL; object->python_hash_seed = NULL; object->destroy_interpreter = -1; object->restrict_embedded = -1; object->restrict_stdin = -1; object->restrict_stdout = -1; object->restrict_signal = -1; #if defined(WIN32) || defined(DARWIN) object->case_sensitivity = 0; #else object->case_sensitivity = 1; #endif object->restrict_process = NULL; object->process_group = NULL; object->application_group = NULL; object->callable_object = NULL; object->dispatch_script = NULL; object->pass_apache_request = -1; object->pass_authorization = -1; object->script_reloading = -1; object->error_override = -1; object->chunked_request = -1; object->ignore_activity = -1; object->enable_sendfile = -1; object->server_metrics = -1; object->newrelic_config_file = NULL; object->newrelic_environment = NULL; return object; } /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_server.h000066400000000000000000000065011452636074700201510ustar00rootroot00000000000000#ifndef WSGI_SERVER_H #define WSGI_SERVER_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" /* ------------------------------------------------------------------------- */ extern server_rec *wsgi_server; extern pid_t wsgi_parent_pid; extern pid_t wsgi_worker_pid; extern pid_t wsgi_daemon_pid; extern const char *wsgi_daemon_group; extern apr_time_t wsgi_restart_time; /* New Relic monitoring agent. */ extern const char *wsgi_newrelic_config_file; extern const char *wsgi_newrelic_environment; /* Python interpreter state. */ extern PyThreadState *wsgi_main_tstate; typedef struct { const char *location; const char *application; ap_regex_t *regexp; const char *process_group; const char *application_group; const char *callable_object; int pass_authorization; } WSGIAliasEntry; typedef struct { const char *handler_script; const char *process_group; const char *application_group; const char *callable_object; const char *pass_authorization; } WSGIScriptFile; typedef struct { apr_pool_t *pool; apr_array_header_t *alias_list; const char *socket_prefix; int socket_rotation; apr_lockmech_e lock_mechanism; int verbose_debugging; apr_array_header_t *python_warnings; int python_optimize; int py3k_warning_flag; int dont_write_bytecode; const char *lang; const char *locale; const char *python_home; const char *python_path; const char *python_eggs; const char *python_hash_seed; int destroy_interpreter; int restrict_embedded; int restrict_stdin; int restrict_stdout; int restrict_signal; int case_sensitivity; apr_table_t *restrict_process; const char *process_group; const char *application_group; const char *callable_object; WSGIScriptFile *dispatch_script; int pass_apache_request; int pass_authorization; int script_reloading; int error_override; int chunked_request; int map_head_to_get; int ignore_activity; apr_array_header_t *trusted_proxy_headers; apr_array_header_t *trusted_proxies; int enable_sendfile; apr_hash_t *handler_scripts; int server_metrics; const char *newrelic_config_file; const char *newrelic_environment; } WSGIServerConfig; extern WSGIServerConfig *wsgi_server_config; extern WSGIScriptFile *newWSGIScriptFile(apr_pool_t *p); extern WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p); extern apr_pool_t *wsgi_daemon_pool; /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_stream.c000066400000000000000000000154651452636074700201420ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_stream.h" /* ------------------------------------------------------------------------- */ PyTypeObject Stream_Type; static PyObject *Stream_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { StreamObject *self; self = (StreamObject *)type->tp_alloc(type, 0); if (self == NULL) return NULL; self->filelike = Py_None; Py_INCREF(self->filelike); self->blksize = 0; return (PyObject *)self; } static int Stream_init(StreamObject *self, PyObject *args, PyObject *kwds) { PyObject *filelike = NULL; apr_size_t blksize = HUGE_STRING_LEN; static char *kwlist[] = { "filelike", "blksize", NULL }; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|l:FileWrapper", kwlist, &filelike, &blksize)) { return -1; } if (filelike) { PyObject *tmp = NULL; tmp = self->filelike; Py_INCREF(filelike); self->filelike = filelike; Py_XDECREF(tmp); } self->blksize = blksize; return 0; } static void Stream_dealloc(StreamObject *self) { Py_XDECREF(self->filelike); Py_TYPE(self)->tp_free(self); } static PyObject *Stream_iter(StreamObject *self) { Py_INCREF(self); return (PyObject *)self; } static PyObject *Stream_iternext(StreamObject *self) { PyObject *attribute = NULL; PyObject *method = NULL; PyObject *args = NULL; PyObject *result = NULL; attribute = PyObject_GetAttrString((PyObject *)self, "filelike"); if (!attribute) { PyErr_SetString(PyExc_KeyError, "file wrapper no filelike attribute"); return 0; } method = PyObject_GetAttrString(attribute, "read"); if (!method) { PyErr_SetString(PyExc_KeyError, "file like object has no read() method"); Py_DECREF(attribute); return 0; } Py_DECREF(attribute); attribute = PyObject_GetAttrString((PyObject *)self, "blksize"); if (!attribute) { PyErr_SetString(PyExc_KeyError, "file wrapper has no blksize attribute"); Py_DECREF(method); return 0; } if (!PyLong_Check(attribute)) { PyErr_SetString(PyExc_KeyError, "file wrapper blksize attribute not integer"); Py_DECREF(method); Py_DECREF(attribute); return 0; } args = Py_BuildValue("(O)", attribute); result = PyObject_CallObject(method, args); Py_DECREF(args); Py_DECREF(method); Py_DECREF(attribute); if (!result) return 0; if (PyString_Check(result)) { if (PyString_Size(result) == 0) { PyErr_SetObject(PyExc_StopIteration, Py_None); Py_DECREF(result); return 0; } return result; } Py_DECREF(result); PyErr_SetString(PyExc_TypeError, "file like object yielded non string type"); return 0; } static PyObject *Stream_close(StreamObject *self, PyObject *args) { PyObject *method = NULL; PyObject *result = NULL; if (!self->filelike || self->filelike == Py_None) { Py_INCREF(Py_None); return Py_None; } method = PyObject_GetAttrString(self->filelike, "close"); if (method) { result = PyObject_CallObject(method, (PyObject *)NULL); if (!result) PyErr_Clear(); Py_DECREF(method); } Py_XDECREF(result); Py_DECREF(self->filelike); self->filelike = NULL; Py_INCREF(Py_None); return Py_None; } static PyObject *Stream_get_filelike(StreamObject *self, void *closure) { Py_INCREF(self->filelike); return self->filelike; } static PyObject *Stream_get_blksize(StreamObject *self, void *closure) { return PyLong_FromLong(self->blksize); } static PyMethodDef Stream_methods[] = { { "close", (PyCFunction)Stream_close, METH_NOARGS, 0 }, { NULL, NULL } }; static PyGetSetDef Stream_getset[] = { { "filelike", (getter)Stream_get_filelike, NULL, 0 }, { "blksize", (getter)Stream_get_blksize, NULL, 0 }, { NULL }, }; PyTypeObject Stream_Type = { PyVarObject_HEAD_INIT(NULL, 0) "mod_wsgi.FileWrapper", /*tp_name*/ sizeof(StreamObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ /* methods */ (destructor)Stream_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ #if defined(Py_TPFLAGS_HAVE_ITER) Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_ITER, /*tp_flags*/ #else Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ #endif 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ (getiterfunc)Stream_iter, /*tp_iter*/ (iternextfunc)Stream_iternext, /*tp_iternext*/ Stream_methods, /*tp_methods*/ 0, /*tp_members*/ Stream_getset, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ (initproc)Stream_init, /*tp_init*/ 0, /*tp_alloc*/ Stream_new, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ }; /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_stream.h000066400000000000000000000022731452636074700201400ustar00rootroot00000000000000#ifndef WSGI_STREAM_H #define WSGI_STREAM_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" /* ------------------------------------------------------------------------- */ typedef struct { PyObject_HEAD PyObject *filelike; apr_size_t blksize; } StreamObject; extern PyTypeObject Stream_Type; /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_thread.c000066400000000000000000000112631452636074700201060ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_thread.h" #include "wsgi_server.h" #if defined(__APPLE__) #include #include #include #endif #if defined(linux) #include #include #endif /* ------------------------------------------------------------------------- */ int wsgi_total_threads; int wsgi_request_threads; apr_threadkey_t *wsgi_thread_key; apr_array_header_t *wsgi_thread_details; WSGIThreadInfo *wsgi_thread_info(int create, int request) { WSGIThreadInfo *thread_handle = NULL; apr_threadkey_private_get((void**)&thread_handle, wsgi_thread_key); if (!thread_handle && create) { WSGIThreadInfo **entry = NULL; if (!wsgi_thread_details) { wsgi_thread_details = apr_array_make( wsgi_server->process->pool, 3, sizeof(char*)); } thread_handle = (WSGIThreadInfo *)apr_pcalloc( wsgi_server->process->pool, sizeof(WSGIThreadInfo)); thread_handle->log_buffer = NULL; thread_handle->thread_id = wsgi_total_threads++; entry = (WSGIThreadInfo **)apr_array_push(wsgi_thread_details); *entry = thread_handle; apr_threadkey_private_set(thread_handle, wsgi_thread_key); } if (thread_handle && request && !thread_handle->request_thread) { thread_handle->request_thread = 1; wsgi_request_threads++; } return thread_handle; } /* ------------------------------------------------------------------------- */ int wsgi_thread_cpu_usage(WSGIThreadCPUUsage *usage) { #if defined(__APPLE__) mach_port_t thread; kern_return_t kr; mach_msg_type_number_t count; thread_basic_info_data_t info; usage->user_time = 0.0; usage->system_time = 0.0; thread = mach_thread_self(); count = THREAD_BASIC_INFO_COUNT; kr = thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count); mach_port_deallocate(mach_task_self(), thread); if (kr == KERN_SUCCESS && (info.flags & TH_FLAGS_IDLE) == 0) { usage->user_time = info.user_time.seconds; usage->user_time += info.user_time.microseconds / 1000000.0; usage->system_time = info.system_time.seconds; usage->system_time += info.system_time.microseconds / 1000000.0; return 1; } #elif defined(linux) && defined(RUSAGE_THREAD) struct rusage info; usage->user_time = 0.0; usage->system_time = 0.0; if (getrusage(RUSAGE_THREAD, &info) == 0) { usage->user_time = info.ru_utime.tv_sec; usage->user_time += info.ru_utime.tv_usec / 1000000.0; usage->system_time = info.ru_stime.tv_sec; usage->system_time += info.ru_stime.tv_usec / 1000000.0; return 1; } #elif defined(linux) FILE* fp; char filename[256]; char content[1024]; long tid; /* Field 14. Numbering start at 1. */ int offset = 13; char *p; unsigned long user_time = 0; unsigned long system_time = 0; int ticks; usage->user_time = 0.0; usage->system_time = 0.0; memset(content, '\0', sizeof(content)); tid = (long)syscall(SYS_gettid); ticks = sysconf(_SC_CLK_TCK); sprintf(filename, "/proc/%ld/stat", tid); fp = fopen(filename, "r"); if (fp) { if (fread(content, 1, sizeof(content)-1, fp)) { p = content; while (*p && offset) { if (*p++ == ' ') { offset--; while (*p == ' ') p++; } } user_time = strtoul(p, &p, 10); while (*p == ' ') p++; system_time = strtoul(p, &p, 10); } fclose(fp); usage->user_time = (float)user_time / ticks; usage->system_time = (float)system_time / ticks; return 1; } #endif return 0; } /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_thread.h000066400000000000000000000031301452636074700201050ustar00rootroot00000000000000#ifndef WSGI_THREAD_H #define WSGI_THREAD_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" #include "wsgi_apache.h" /* ------------------------------------------------------------------------- */ typedef struct { int thread_id; int request_thread; apr_int64_t request_count; PyObject *request_id; PyObject *request_data; PyObject *log_buffer; } WSGIThreadInfo; extern int wsgi_total_threads; extern int wsgi_request_threads; extern apr_threadkey_t *wsgi_thread_key; extern apr_array_header_t *wsgi_thread_details; extern WSGIThreadInfo *wsgi_thread_info(int create, int request); typedef struct { double user_time; double system_time; } WSGIThreadCPUUsage; extern int wsgi_thread_cpu_usage(WSGIThreadCPUUsage *usage); /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_validate.c000066400000000000000000000121541452636074700204300ustar00rootroot00000000000000/* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_validate.h" #include "wsgi_convert.h" #include /* ------------------------------------------------------------------------- */ /* * A WSGI response status line consists of a status code and a reason * phrase separated by one or more space characters. The status code is * a 3 digit integer. The reason phrase is any text excluding control * characters and specifically excluding any carriage return or line * feed characters. Technically the reason phrase can be empty so long * as there still is at least a single space after the status code. */ int wsgi_validate_status_line(PyObject *value) { const char *s; if (!PyBytes_Check(value)) { PyErr_Format(PyExc_TypeError, "expected byte string object for " "status line, value of type %.200s found", value->ob_type->tp_name); return 0; } s = PyBytes_AsString(value); if (!isdigit(*s++) || !isdigit(*s++) || !isdigit(*s++)) { PyErr_SetString(PyExc_ValueError, "status code is not a 3 digit integer"); return 0; } if (isdigit(*s)) { PyErr_SetString(PyExc_ValueError, "status code is not a 3 digit integer"); return 0; } if (*s != ' ') { PyErr_SetString(PyExc_ValueError, "no space following status code"); return 0; } if (!*s) { PyErr_SetString(PyExc_ValueError, "no reason phrase supplied"); return 0; } while (*s) { if (iscntrl(*s)) { PyErr_SetString(PyExc_ValueError, "control character present in reason phrase"); return 0; } s++; } return 1; } /* ------------------------------------------------------------------------- */ /* * A WSGI header name is a token consisting of one or more characters * except control characters, the separator characters "(", ")", "<", * ">", "@", ",", ";", ":", "\", <">, "/", "[", "]", "?", "=", "{", "}" * and the space character. Only bother checking for control characters * and space characters as it is only carriage return, line feed, * leading and trailing white space that are really a problem. */ int wsgi_validate_header_name(PyObject *value) { const char *s; if (!PyBytes_Check(value)) { PyErr_Format(PyExc_TypeError, "expected byte string object for " "header name, value of type %.200s found", value->ob_type->tp_name); return 0; } s = PyBytes_AsString(value); if (!*s) { PyErr_SetString(PyExc_ValueError, "header name is empty"); return 0; } while (*s) { if (iscntrl(*s)) { PyErr_SetString(PyExc_ValueError, "control character present in header name"); return 0; } if (*s == ' ') { PyErr_SetString(PyExc_ValueError, "space character present in header name"); return 0; } s++; } return 1; } /* ------------------------------------------------------------------------- */ /* * A WSGI header value consists of any number of characters except * control characters. Only bother checking for carriage return and line * feed characters as it is not possible to trust that applications will * not use control characters. In practice the intent is that WSGI * applications shouldn't use embedded carriage return and line feed * characters to prevent attempts at line continuation which may cause * problems with some hosting mechanisms. In other words, the header * value should be all on one line. */ int wsgi_validate_header_value(PyObject *value) { const char *s; if (!PyBytes_Check(value)) { PyErr_Format(PyExc_TypeError, "expected byte string object for " "header value, value of type %.200s found", value->ob_type->tp_name); return 0; } s = PyBytes_AsString(value); while (*s) { if (*s == '\r' || *s == '\n') { PyErr_SetString(PyExc_ValueError, "carriage return/line " "feed character present in header value"); return 0; } s++; } return 1; } /* ------------------------------------------------------------------------- */ /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_validate.h000066400000000000000000000022731452636074700204360ustar00rootroot00000000000000#ifndef WSGI_VALIDATE_H #define WSGI_VALIDATE_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ #include "wsgi_python.h" /* ------------------------------------------------------------------------- */ extern int wsgi_validate_status_line(PyObject *value); extern int wsgi_validate_header_name(PyObject *value); extern int wsgi_validate_header_value(PyObject *value); /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/src/server/wsgi_version.h000077500000000000000000000021501452636074700203270ustar00rootroot00000000000000#ifndef WSGI_VERSION_H #define WSGI_VERSION_H /* ------------------------------------------------------------------------- */ /* * Copyright 2007-2023 GRAHAM DUMPLETON * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* ------------------------------------------------------------------------- */ /* Module version information. */ #define MOD_WSGI_MAJORVERSION_NUMBER 5 #define MOD_WSGI_MINORVERSION_NUMBER 0 #define MOD_WSGI_MICROVERSION_NUMBER 0 #define MOD_WSGI_VERSION_STRING "5.0.0" /* ------------------------------------------------------------------------- */ #endif /* vi: set sw=4 expandtab : */ mod_wsgi-5.0.0/tests/000077500000000000000000000000001452636074700145045ustar00rootroot00000000000000mod_wsgi-5.0.0/tests/access.wsgi000066400000000000000000000001121452636074700166320ustar00rootroot00000000000000def allow_access(environ, host): print(environ, host) return True mod_wsgi-5.0.0/tests/auth.wsgi000066400000000000000000000017471452636074700163510ustar00rootroot00000000000000def allow_access(environ, host): print('HOST', host, environ['REQUEST_URI']) return True def check_password(environ, user, password): print('USER', user, environ['REQUEST_URI']) if user == 'spy': if password == 'secret': return True return False elif user == 'witness': if password == 'secret': return 'protected' return False return None import hashlib def get_realm_hash(environ, user, realm): print('USER', user, environ['REQUEST_URI']) if user == 'spy': value = hashlib.md5() # user:realm:password input = '%s:%s:%s' % (user, realm, 'secret') if not isinstance(input, bytes): input = input.encode('UTF-8') value.update(input) hash = value.hexdigest() return hash return None def groups_for_user(environ, user): print('GROUP', user, environ['REQUEST_URI']) if user == 'spy': return ['secret-agents'] return [''] mod_wsgi-5.0.0/tests/environ.wsgi000066400000000000000000000103411452636074700170560ustar00rootroot00000000000000from __future__ import print_function import os import sys import locale try: from cStringIO import StringIO except ImportError: from io import StringIO import mod_wsgi import apache def application(environ, start_response): print('request message #1', file=environ['wsgi.errors']) print('global message #1') print('queued message #1', end='') print('request message #2', file=environ['wsgi.errors']) print('global message #2') print('queued message #2', end='') print('request message #3', file=environ['wsgi.errors']) print('queued message #3', '+', sep="", end='') print('queued message #4', end='') headers = [] headers.append(('Content-Type', 'text/plain; charset="UTF-8"')) write = start_response('200 OK', headers) input = environ['wsgi.input'] output = StringIO() if os.name != 'nt': print('PID: %s' % os.getpid(), file=output) print('UID: %s' % os.getuid(), file=output) print('GID: %s' % os.getgid(), file=output) print('CWD: %s' % os.getcwd(), file=output) print(file=output) print('STDOUT:', sys.stdout.name, file=output) print('STDERR:', sys.stderr.name, file=output) print('ERRORS:', environ['wsgi.errors'].name, file=output) print(file=output) print('python.version: %r' % (sys.version,), file=output) print('python.prefix: %r' % (sys.prefix,), file=output) print('python.path: %r' % (sys.path,), file=output) print(file=output) print('apache.version: %r' % (apache.version,), file=output) print('mod_wsgi.version: %r' % (mod_wsgi.version,), file=output) print(file=output) print('mod_wsgi.process_group: %s' % mod_wsgi.process_group, file=output) print('mod_wsgi.application_group: %s' % mod_wsgi.application_group, file=output) print(file=output) print('mod_wsgi.maximum_processes: %s' % mod_wsgi.maximum_processes, file=output) print('mod_wsgi.threads_per_process: %s' % mod_wsgi.threads_per_process, file=output) print('mod_wsgi.request_metrics: %s' % mod_wsgi.request_metrics(), file=output) print('mod_wsgi.process_metrics: %s' % mod_wsgi.process_metrics(), file=output) print('mod_wsgi.server_metrics: %s' % mod_wsgi.server_metrics(), file=output) print(file=output) metrics = mod_wsgi.server_metrics() if metrics: for process in metrics['processes']: for worker in process['workers']: print(worker['status'], file=output, end='') print(file=output) print(file=output) print('apache.description: %s' % apache.description, file=output) print('apache.build_date: %s' % apache.build_date, file=output) print('apache.mpm_name: %s' % apache.mpm_name, file=output) print('apache.maximum_processes: %s' % apache.maximum_processes, file=output) print('apache.threads_per_process: %s' % apache.threads_per_process, file=output) print(file=output) print('PATH: %s' % os.environ.get('PATH'), file=output) print(file=output) print('LANG: %s' % os.environ.get('LANG'), file=output) print('LC_ALL: %s' % os.environ.get('LC_ALL'), file=output) print('sys.getdefaultencoding(): %s' % sys.getdefaultencoding(), file=output) print('sys.getfilesystemencoding(): %s' % sys.getfilesystemencoding(), file=output) print('locale.getlocale(): %s' % (locale.getlocale(),), file=output) print('locale.getdefaultlocale(): %s' % (locale.getdefaultlocale(),), file=output) print('locale.getpreferredencoding(): %s' % locale.getpreferredencoding(), file=output) print(file=output) keys = sorted(environ.keys()) for key in keys: print('%s: %s' % (key, repr(environ[key])), file=output) print(file=output) keys = sorted(os.environ.keys()) for key in keys: print('%s: %s' % (key, repr(os.environ[key])), file=output) print(file=output) result = output.getvalue() if not isinstance(result, bytes): result = result.encode('UTF-8') yield result block_size = 8192 data = input.read(block_size) while data: yield data data = input.read(block_size) mod_wsgi-5.0.0/tests/events.wsgi000066400000000000000000000047121452636074700167070ustar00rootroot00000000000000from __future__ import print_function import mod_wsgi import traceback import time import os import threading import atexit try: mod_wsgi.request_data() except RuntimeError: print('INACTIVE') def wrapper(application): def _application(environ, start_response): print('WRAPPER', application) return application(environ, start_response) return _application def event_handler(name, **kwargs): print('EVENT-HANDLER', name, kwargs, os.getpid(), mod_wsgi.application_group) if name == 'request_started': thread = threading.current_thread() request_data = kwargs['request_data'] request_data['thread_name'] = thread.name request_data['thread_id'] = thread.ident return dict(application_object=wrapper(kwargs['application_object'])) elif name == 'response_started': print('REQUESTS', mod_wsgi.active_requests) elif name == 'request_finished': print('PROCESS', mod_wsgi.process_metrics()) elif name == 'request_exception': exception_info = kwargs['exception_info'] traceback.print_exception(*exception_info) elif name == 'process_stopping': print('SHUTDOWN', mod_wsgi.active_requests) print('EVENTS#ALL', mod_wsgi.event_callbacks) mod_wsgi.subscribe_events(event_handler) def shutdown_handler(event, **kwargs): print('SHUTDOWN-HANDLER', event, kwargs) print('EVENTS#SHUTDOWN', mod_wsgi.event_callbacks) mod_wsgi.subscribe_shutdown(shutdown_handler) print('CALLBACKS', mod_wsgi.event_callbacks) def atexit_handler(): print('ATEXIT-HANDLER') atexit.register(atexit_handler) def do_sleep(duration): time.sleep(duration) def application(environ, start_response): failure_mode = environ.get('HTTP_X_FAILURE_MODE', '') failure_mode = failure_mode.split() sleep_duration = environ.get('HTTP_X_SLEEP_DURATION', 0) sleep_duration = float(sleep_duration or 0) if 'application' in failure_mode: raise RuntimeError('application') status = '200 OK' output = b'Hello World!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) environ['wsgi.input'].read() if sleep_duration: do_sleep(sleep_duration) try: yield output if 'yield' in failure_mode: raise RuntimeError('yield') finally: if 'close' in failure_mode: raise RuntimeError('close') mod_wsgi-5.0.0/tests/hello.wsgi000066400000000000000000000004261452636074700165040ustar00rootroot00000000000000def application(environ, start_response): status = '200 OK' output = b'Hello World!' response_headers = [('Content-type', 'text/plain'), ('Content-Length', str(len(output)))] start_response(status, response_headers) return [output] mod_wsgi-5.0.0/tox.ini000066400000000000000000000002171452636074700146550ustar00rootroot00000000000000[tox] envlist = py38,py39,py310,py311,py312 [gh-actions] python = 3.8: py38 3.9: py39 3.10: py310 3.11: py311 3.12: py312